code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1 value |
|---|---|---|---|---|---|
package storetest
import (
"database/sql"
"errors"
"testing"
"github.com/mattermost/mattermost-server/v5/model"
"github.com/mattermost/mattermost-server/v5/store"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestChannelStoreCategories(t *testing.T, ss store.Store, s SqlSupplier) {
t.Run("CreateInitialSidebarCategories", func(t *testing.T) { testCreateInitialSidebarCategories(t, ss) })
t.Run("CreateSidebarCategory", func(t *testing.T) { testCreateSidebarCategory(t, ss) })
t.Run("GetSidebarCategory", func(t *testing.T) { testGetSidebarCategory(t, ss, s) })
t.Run("GetSidebarCategories", func(t *testing.T) { testGetSidebarCategories(t, ss) })
t.Run("UpdateSidebarCategories", func(t *testing.T) { testUpdateSidebarCategories(t, ss, s) })
t.Run("DeleteSidebarCategory", func(t *testing.T) { testDeleteSidebarCategory(t, ss, s) })
}
func testCreateInitialSidebarCategories(t *testing.T, ss store.Store) {
t.Run("should create initial favorites/channels/DMs categories", func(t *testing.T) {
userId := model.NewId()
teamId := model.NewId()
nErr := ss.Channel().CreateInitialSidebarCategories(userId, teamId)
assert.Nil(t, nErr)
res, err := ss.Channel().GetSidebarCategories(userId, teamId)
assert.Nil(t, err)
assert.Len(t, res.Categories, 3)
assert.Equal(t, model.SidebarCategoryFavorites, res.Categories[0].Type)
assert.Equal(t, model.SidebarCategoryChannels, res.Categories[1].Type)
assert.Equal(t, model.SidebarCategoryDirectMessages, res.Categories[2].Type)
})
t.Run("should create initial favorites/channels/DMs categories for multiple users", func(t *testing.T) {
userId := model.NewId()
teamId := model.NewId()
nErr := ss.Channel().CreateInitialSidebarCategories(userId, teamId)
require.Nil(t, nErr)
userId2 := model.NewId()
nErr = ss.Channel().CreateInitialSidebarCategories(userId2, teamId)
assert.Nil(t, nErr)
res, err := ss.Channel().GetSidebarCategories(userId2, teamId)
assert.Nil(t, err)
assert.Len(t, res.Categories, 3)
assert.Equal(t, model.SidebarCategoryFavorites, res.Categories[0].Type)
assert.Equal(t, model.SidebarCategoryChannels, res.Categories[1].Type)
assert.Equal(t, model.SidebarCategoryDirectMessages, res.Categories[2].Type)
})
t.Run("should create initial favorites/channels/DMs categories on different teams", func(t *testing.T) {
userId := model.NewId()
teamId := model.NewId()
nErr := ss.Channel().CreateInitialSidebarCategories(userId, teamId)
require.Nil(t, nErr)
teamId2 := model.NewId()
nErr = ss.Channel().CreateInitialSidebarCategories(userId, teamId2)
assert.Nil(t, nErr)
res, err := ss.Channel().GetSidebarCategories(userId, teamId2)
assert.Nil(t, err)
assert.Len(t, res.Categories, 3)
assert.Equal(t, model.SidebarCategoryFavorites, res.Categories[0].Type)
assert.Equal(t, model.SidebarCategoryChannels, res.Categories[1].Type)
assert.Equal(t, model.SidebarCategoryDirectMessages, res.Categories[2].Type)
})
t.Run("shouldn't create additional categories when ones already exist", func(t *testing.T) {
userId := model.NewId()
teamId := model.NewId()
nErr := ss.Channel().CreateInitialSidebarCategories(userId, teamId)
require.Nil(t, nErr)
initialCategories, err := ss.Channel().GetSidebarCategories(userId, teamId)
require.Nil(t, err)
// Calling CreateInitialSidebarCategories a second time shouldn't create any new categories
nErr = ss.Channel().CreateInitialSidebarCategories(userId, teamId)
assert.Nil(t, nErr)
res, err := ss.Channel().GetSidebarCategories(userId, teamId)
assert.Nil(t, err)
assert.Equal(t, initialCategories.Categories, res.Categories)
})
t.Run("should populate the Favorites category with regular channels", func(t *testing.T) {
userId := model.NewId()
teamId := model.NewId()
// Set up two channels, one favorited and one not
channel1, nErr := ss.Channel().Save(&model.Channel{
TeamId: teamId,
Type: model.CHANNEL_OPEN,
Name: "channel1",
}, 1000)
require.Nil(t, nErr)
_, err := ss.Channel().SaveMember(&model.ChannelMember{
ChannelId: channel1.Id,
UserId: userId,
NotifyProps: model.GetDefaultChannelNotifyProps(),
})
require.Nil(t, err)
channel2, nErr := ss.Channel().Save(&model.Channel{
TeamId: teamId,
Type: model.CHANNEL_OPEN,
Name: "channel2",
}, 1000)
require.Nil(t, nErr)
_, err = ss.Channel().SaveMember(&model.ChannelMember{
ChannelId: channel2.Id,
UserId: userId,
NotifyProps: model.GetDefaultChannelNotifyProps(),
})
require.Nil(t, err)
nErr = ss.Preference().Save(&model.Preferences{
{
UserId: userId,
Category: model.PREFERENCE_CATEGORY_FAVORITE_CHANNEL,
Name: channel1.Id,
Value: "true",
},
})
require.Nil(t, nErr)
// Create the categories
nErr = ss.Channel().CreateInitialSidebarCategories(userId, teamId)
require.Nil(t, nErr)
// Get and check the categories for channels
categories, err := ss.Channel().GetSidebarCategories(userId, teamId)
require.Nil(t, err)
require.Len(t, categories.Categories, 3)
assert.Equal(t, model.SidebarCategoryFavorites, categories.Categories[0].Type)
assert.Equal(t, []string{channel1.Id}, categories.Categories[0].Channels)
assert.Equal(t, model.SidebarCategoryChannels, categories.Categories[1].Type)
assert.Equal(t, []string{channel2.Id}, categories.Categories[1].Channels)
})
t.Run("should populate the Favorites category in alphabetical order", func(t *testing.T) {
userId := model.NewId()
teamId := model.NewId()
// Set up two channels
channel1, nErr := ss.Channel().Save(&model.Channel{
TeamId: teamId,
Type: model.CHANNEL_OPEN,
Name: "channel1",
DisplayName: "zebra",
}, 1000)
require.Nil(t, nErr)
_, err := ss.Channel().SaveMember(&model.ChannelMember{
ChannelId: channel1.Id,
UserId: userId,
NotifyProps: model.GetDefaultChannelNotifyProps(),
})
require.Nil(t, err)
channel2, nErr := ss.Channel().Save(&model.Channel{
TeamId: teamId,
Type: model.CHANNEL_OPEN,
Name: "channel2",
DisplayName: "aardvark",
}, 1000)
require.Nil(t, nErr)
_, err = ss.Channel().SaveMember(&model.ChannelMember{
ChannelId: channel2.Id,
UserId: userId,
NotifyProps: model.GetDefaultChannelNotifyProps(),
})
require.Nil(t, err)
nErr = ss.Preference().Save(&model.Preferences{
{
UserId: userId,
Category: model.PREFERENCE_CATEGORY_FAVORITE_CHANNEL,
Name: channel1.Id,
Value: "true",
},
{
UserId: userId,
Category: model.PREFERENCE_CATEGORY_FAVORITE_CHANNEL,
Name: channel2.Id,
Value: "true",
},
})
require.Nil(t, nErr)
// Create the categories
nErr = ss.Channel().CreateInitialSidebarCategories(userId, teamId)
require.Nil(t, nErr)
// Get and check the categories for channels
categories, err := ss.Channel().GetSidebarCategories(userId, teamId)
require.Nil(t, err)
require.Len(t, categories.Categories, 3)
assert.Equal(t, model.SidebarCategoryFavorites, categories.Categories[0].Type)
assert.Equal(t, []string{channel2.Id, channel1.Id}, categories.Categories[0].Channels)
})
t.Run("should populate the Favorites category with DMs and GMs", func(t *testing.T) {
userId := model.NewId()
teamId := model.NewId()
otherUserId1 := model.NewId()
otherUserId2 := model.NewId()
// Set up two direct channels, one favorited and one not
dmChannel1, err := ss.Channel().SaveDirectChannel(
&model.Channel{
Name: model.GetDMNameFromIds(userId, otherUserId1),
Type: model.CHANNEL_DIRECT,
},
&model.ChannelMember{
UserId: userId,
NotifyProps: model.GetDefaultChannelNotifyProps(),
},
&model.ChannelMember{
UserId: otherUserId1,
NotifyProps: model.GetDefaultChannelNotifyProps(),
},
)
require.Nil(t, err)
dmChannel2, err := ss.Channel().SaveDirectChannel(
&model.Channel{
Name: model.GetDMNameFromIds(userId, otherUserId2),
Type: model.CHANNEL_DIRECT,
},
&model.ChannelMember{
UserId: userId,
NotifyProps: model.GetDefaultChannelNotifyProps(),
},
&model.ChannelMember{
UserId: otherUserId2,
NotifyProps: model.GetDefaultChannelNotifyProps(),
},
)
require.Nil(t, err)
err = ss.Preference().Save(&model.Preferences{
{
UserId: userId,
Category: model.PREFERENCE_CATEGORY_FAVORITE_CHANNEL,
Name: dmChannel1.Id,
Value: "true",
},
})
require.Nil(t, err)
// Create the categories
nErr := ss.Channel().CreateInitialSidebarCategories(userId, teamId)
require.Nil(t, nErr)
// Get and check the categories for channels
categories, err := ss.Channel().GetSidebarCategories(userId, teamId)
require.Nil(t, err)
require.Len(t, categories.Categories, 3)
assert.Equal(t, model.SidebarCategoryFavorites, categories.Categories[0].Type)
assert.Equal(t, []string{dmChannel1.Id}, categories.Categories[0].Channels)
assert.Equal(t, model.SidebarCategoryDirectMessages, categories.Categories[2].Type)
assert.Equal(t, []string{dmChannel2.Id}, categories.Categories[2].Channels)
})
t.Run("should not populate the Favorites category with channels from other teams", func(t *testing.T) {
userId := model.NewId()
teamId := model.NewId()
teamId2 := model.NewId()
// Set up a channel on another team and favorite it
channel1, nErr := ss.Channel().Save(&model.Channel{
TeamId: teamId2,
Type: model.CHANNEL_OPEN,
Name: "channel1",
}, 1000)
require.Nil(t, nErr)
_, err := ss.Channel().SaveMember(&model.ChannelMember{
ChannelId: channel1.Id,
UserId: userId,
NotifyProps: model.GetDefaultChannelNotifyProps(),
})
require.Nil(t, err)
nErr = ss.Preference().Save(&model.Preferences{
{
UserId: userId,
Category: model.PREFERENCE_CATEGORY_FAVORITE_CHANNEL,
Name: channel1.Id,
Value: "true",
},
})
require.Nil(t, nErr)
// Create the categories
nErr = ss.Channel().CreateInitialSidebarCategories(userId, teamId)
require.Nil(t, nErr)
// Get and check the categories for channels
categories, err := ss.Channel().GetSidebarCategories(userId, teamId)
require.Nil(t, err)
require.Len(t, categories.Categories, 3)
assert.Equal(t, model.SidebarCategoryFavorites, categories.Categories[0].Type)
assert.Equal(t, []string{}, categories.Categories[0].Channels)
assert.Equal(t, model.SidebarCategoryChannels, categories.Categories[1].Type)
assert.Equal(t, []string{}, categories.Categories[1].Channels)
})
}
func testCreateSidebarCategory(t *testing.T, ss store.Store) {
t.Run("should place the new category second if Favorites comes first", func(t *testing.T) {
userId := model.NewId()
teamId := model.NewId()
nErr := ss.Channel().CreateInitialSidebarCategories(userId, teamId)
require.Nil(t, nErr)
// Create the category
created, err := ss.Channel().CreateSidebarCategory(userId, teamId, &model.SidebarCategoryWithChannels{
SidebarCategory: model.SidebarCategory{
DisplayName: model.NewId(),
},
})
require.Nil(t, err)
// Confirm that it comes second
res, err := ss.Channel().GetSidebarCategories(userId, teamId)
require.Nil(t, err)
require.Len(t, res.Categories, 4)
assert.Equal(t, model.SidebarCategoryFavorites, res.Categories[0].Type)
assert.Equal(t, model.SidebarCategoryCustom, res.Categories[1].Type)
assert.Equal(t, created.Id, res.Categories[1].Id)
})
t.Run("should place the new category first if Favorites is not first", func(t *testing.T) {
userId := model.NewId()
teamId := model.NewId()
nErr := ss.Channel().CreateInitialSidebarCategories(userId, teamId)
require.Nil(t, nErr)
// Re-arrange the categories so that Favorites comes last
categories, err := ss.Channel().GetSidebarCategories(userId, teamId)
require.Nil(t, err)
require.Len(t, categories.Categories, 3)
require.Equal(t, model.SidebarCategoryFavorites, categories.Categories[0].Type)
err = ss.Channel().UpdateSidebarCategoryOrder(userId, teamId, []string{
categories.Categories[1].Id,
categories.Categories[2].Id,
categories.Categories[0].Id,
})
require.Nil(t, err)
// Create the category
created, err := ss.Channel().CreateSidebarCategory(userId, teamId, &model.SidebarCategoryWithChannels{
SidebarCategory: model.SidebarCategory{
DisplayName: model.NewId(),
},
})
require.Nil(t, err)
// Confirm that it comes first
res, err := ss.Channel().GetSidebarCategories(userId, teamId)
require.Nil(t, err)
require.Len(t, res.Categories, 4)
assert.Equal(t, model.SidebarCategoryCustom, res.Categories[0].Type)
assert.Equal(t, created.Id, res.Categories[0].Id)
})
t.Run("should create the category with its channels", func(t *testing.T) {
userId := model.NewId()
teamId := model.NewId()
nErr := ss.Channel().CreateInitialSidebarCategories(userId, teamId)
require.Nil(t, nErr)
// Create some channels
channel1, err := ss.Channel().Save(&model.Channel{
Type: model.CHANNEL_OPEN,
TeamId: teamId,
Name: model.NewId(),
}, 100)
require.Nil(t, err)
channel2, err := ss.Channel().Save(&model.Channel{
Type: model.CHANNEL_OPEN,
TeamId: teamId,
Name: model.NewId(),
}, 100)
require.Nil(t, err)
// Create the category
created, err := ss.Channel().CreateSidebarCategory(userId, teamId, &model.SidebarCategoryWithChannels{
SidebarCategory: model.SidebarCategory{
DisplayName: model.NewId(),
},
Channels: []string{channel2.Id, channel1.Id},
})
require.Nil(t, err)
assert.Equal(t, []string{channel2.Id, channel1.Id}, created.Channels)
// Get the channel again to ensure that the SidebarChannels were saved correctly
res, err := ss.Channel().GetSidebarCategory(created.Id)
require.Nil(t, err)
assert.Equal(t, []string{channel2.Id, channel1.Id}, res.Channels)
})
t.Run("should remove any channels from their previous categories", func(t *testing.T) {
userId := model.NewId()
teamId := model.NewId()
nErr := ss.Channel().CreateInitialSidebarCategories(userId, teamId)
require.Nil(t, nErr)
categories, err := ss.Channel().GetSidebarCategories(userId, teamId)
require.Nil(t, err)
require.Len(t, categories.Categories, 3)
favoritesCategory := categories.Categories[0]
require.Equal(t, model.SidebarCategoryFavorites, favoritesCategory.Type)
channelsCategory := categories.Categories[1]
require.Equal(t, model.SidebarCategoryChannels, channelsCategory.Type)
// Create some channels
channel1, nErr := ss.Channel().Save(&model.Channel{
Type: model.CHANNEL_OPEN,
TeamId: teamId,
Name: model.NewId(),
}, 100)
require.Nil(t, nErr)
channel2, nErr := ss.Channel().Save(&model.Channel{
Type: model.CHANNEL_OPEN,
TeamId: teamId,
Name: model.NewId(),
}, 100)
require.Nil(t, nErr)
// Assign them to categories
favoritesCategory.Channels = []string{channel1.Id}
channelsCategory.Channels = []string{channel2.Id}
_, err = ss.Channel().UpdateSidebarCategories(userId, teamId, []*model.SidebarCategoryWithChannels{
favoritesCategory,
channelsCategory,
})
require.Nil(t, err)
// Create the category
created, err := ss.Channel().CreateSidebarCategory(userId, teamId, &model.SidebarCategoryWithChannels{
SidebarCategory: model.SidebarCategory{
DisplayName: model.NewId(),
},
Channels: []string{channel2.Id, channel1.Id},
})
require.Nil(t, err)
assert.Equal(t, []string{channel2.Id, channel1.Id}, created.Channels)
// Confirm that the channels were removed from their original categories
res, err := ss.Channel().GetSidebarCategory(favoritesCategory.Id)
require.Nil(t, err)
assert.Equal(t, []string{}, res.Channels)
res, err = ss.Channel().GetSidebarCategory(channelsCategory.Id)
require.Nil(t, err)
assert.Equal(t, []string{}, res.Channels)
})
}
func testGetSidebarCategory(t *testing.T, ss store.Store, s SqlSupplier) {
t.Run("should return a custom category with its Channels field set", func(t *testing.T) {
userId := model.NewId()
teamId := model.NewId()
channelId1 := model.NewId()
channelId2 := model.NewId()
channelId3 := model.NewId()
nErr := ss.Channel().CreateInitialSidebarCategories(userId, teamId)
require.Nil(t, nErr)
// Create a category and assign some channels to it
created, err := ss.Channel().CreateSidebarCategory(userId, teamId, &model.SidebarCategoryWithChannels{
SidebarCategory: model.SidebarCategory{
UserId: userId,
TeamId: teamId,
DisplayName: model.NewId(),
},
Channels: []string{channelId1, channelId2, channelId3},
})
require.Nil(t, err)
require.NotNil(t, created)
// Ensure that they're returned in order
res, err := ss.Channel().GetSidebarCategory(created.Id)
assert.Nil(t, err)
assert.Equal(t, created.Id, res.Id)
assert.Equal(t, model.SidebarCategoryCustom, res.Type)
assert.Equal(t, created.DisplayName, res.DisplayName)
assert.Equal(t, []string{channelId1, channelId2, channelId3}, res.Channels)
})
t.Run("should return any orphaned channels with the Channels category", func(t *testing.T) {
userId := model.NewId()
teamId := model.NewId()
// Create the initial categories and find the channels category
nErr := ss.Channel().CreateInitialSidebarCategories(userId, teamId)
require.Nil(t, nErr)
categories, err := ss.Channel().GetSidebarCategories(userId, teamId)
require.Nil(t, err)
channelsCategory := categories.Categories[1]
require.Equal(t, model.SidebarCategoryChannels, channelsCategory.Type)
// Join some channels
channel1, nErr := ss.Channel().Save(&model.Channel{
Name: "channel1",
DisplayName: "DEF",
TeamId: teamId,
Type: model.CHANNEL_PRIVATE,
}, 10)
require.Nil(t, nErr)
_, err = ss.Channel().SaveMember(&model.ChannelMember{
UserId: userId,
ChannelId: channel1.Id,
NotifyProps: model.GetDefaultChannelNotifyProps(),
})
require.Nil(t, err)
channel2, nErr := ss.Channel().Save(&model.Channel{
Name: "channel2",
DisplayName: "ABC",
TeamId: teamId,
Type: model.CHANNEL_OPEN,
}, 10)
require.Nil(t, nErr)
_, err = ss.Channel().SaveMember(&model.ChannelMember{
UserId: userId,
ChannelId: channel2.Id,
NotifyProps: model.GetDefaultChannelNotifyProps(),
})
require.Nil(t, err)
// Confirm that they're not in the Channels category in the DB
count, countErr := s.GetMaster().SelectInt(`
SELECT
COUNT(*)
FROM
SidebarChannels
WHERE
CategoryId = :CategoryId`, map[string]interface{}{"CategoryId": channelsCategory.Id})
require.Nil(t, countErr)
assert.Equal(t, int64(0), count)
// Ensure that the Channels are returned in alphabetical order
res, err := ss.Channel().GetSidebarCategory(channelsCategory.Id)
assert.Nil(t, err)
assert.Equal(t, channelsCategory.Id, res.Id)
assert.Equal(t, model.SidebarCategoryChannels, channelsCategory.Type)
assert.Equal(t, []string{channel2.Id, channel1.Id}, res.Channels)
})
t.Run("shouldn't return orphaned channels on another team with the Channels category", func(t *testing.T) {
userId := model.NewId()
teamId := model.NewId()
// Create the initial categories and find the channels category
nErr := ss.Channel().CreateInitialSidebarCategories(userId, teamId)
require.Nil(t, nErr)
categories, err := ss.Channel().GetSidebarCategories(userId, teamId)
require.Nil(t, err)
require.Equal(t, model.SidebarCategoryChannels, categories.Categories[1].Type)
channelsCategory := categories.Categories[1]
// Join a channel on another team
channel1, nErr := ss.Channel().Save(&model.Channel{
Name: "abc",
TeamId: model.NewId(),
Type: model.CHANNEL_OPEN,
}, 10)
require.Nil(t, nErr)
_, err = ss.Channel().SaveMember(&model.ChannelMember{
UserId: userId,
ChannelId: channel1.Id,
NotifyProps: model.GetDefaultChannelNotifyProps(),
})
require.Nil(t, err)
// Ensure that no channels are returned
res, err := ss.Channel().GetSidebarCategory(channelsCategory.Id)
assert.Nil(t, err)
assert.Equal(t, channelsCategory.Id, res.Id)
assert.Equal(t, model.SidebarCategoryChannels, channelsCategory.Type)
assert.Len(t, res.Channels, 0)
})
t.Run("shouldn't return non-orphaned channels with the Channels category", func(t *testing.T) {
userId := model.NewId()
teamId := model.NewId()
// Create the initial categories and find the channels category
nErr := ss.Channel().CreateInitialSidebarCategories(userId, teamId)
require.Nil(t, nErr)
categories, err := ss.Channel().GetSidebarCategories(userId, teamId)
require.Nil(t, err)
favoritesCategory := categories.Categories[0]
require.Equal(t, model.SidebarCategoryFavorites, favoritesCategory.Type)
channelsCategory := categories.Categories[1]
require.Equal(t, model.SidebarCategoryChannels, channelsCategory.Type)
// Join some channels
channel1, nErr := ss.Channel().Save(&model.Channel{
Name: "channel1",
DisplayName: "DEF",
TeamId: teamId,
Type: model.CHANNEL_PRIVATE,
}, 10)
require.Nil(t, nErr)
_, err = ss.Channel().SaveMember(&model.ChannelMember{
UserId: userId,
ChannelId: channel1.Id,
NotifyProps: model.GetDefaultChannelNotifyProps(),
})
require.Nil(t, err)
channel2, nErr := ss.Channel().Save(&model.Channel{
Name: "channel2",
DisplayName: "ABC",
TeamId: teamId,
Type: model.CHANNEL_OPEN,
}, 10)
require.Nil(t, nErr)
_, err = ss.Channel().SaveMember(&model.ChannelMember{
UserId: userId,
ChannelId: channel2.Id,
NotifyProps: model.GetDefaultChannelNotifyProps(),
})
require.Nil(t, err)
// And assign one to another category
_, err = ss.Channel().UpdateSidebarCategories(userId, teamId, []*model.SidebarCategoryWithChannels{
{
SidebarCategory: favoritesCategory.SidebarCategory,
Channels: []string{channel2.Id},
},
})
require.Nil(t, err)
// Ensure that the correct channel is returned in the Channels category
res, err := ss.Channel().GetSidebarCategory(channelsCategory.Id)
assert.Nil(t, err)
assert.Equal(t, channelsCategory.Id, res.Id)
assert.Equal(t, model.SidebarCategoryChannels, channelsCategory.Type)
assert.Equal(t, []string{channel1.Id}, res.Channels)
})
t.Run("should return any orphaned DM channels with the Direct Messages category", func(t *testing.T) {
userId := model.NewId()
teamId := model.NewId()
// Create the initial categories and find the DMs category
nErr := ss.Channel().CreateInitialSidebarCategories(userId, teamId)
require.Nil(t, nErr)
categories, err := ss.Channel().GetSidebarCategories(userId, teamId)
require.Nil(t, err)
require.Equal(t, model.SidebarCategoryDirectMessages, categories.Categories[2].Type)
dmsCategory := categories.Categories[2]
// Create a DM
otherUserId := model.NewId()
dmChannel, nErr := ss.Channel().SaveDirectChannel(
&model.Channel{
Name: model.GetDMNameFromIds(userId, otherUserId),
Type: model.CHANNEL_DIRECT,
},
&model.ChannelMember{
UserId: userId,
NotifyProps: model.GetDefaultChannelNotifyProps(),
},
&model.ChannelMember{
UserId: otherUserId,
NotifyProps: model.GetDefaultChannelNotifyProps(),
},
)
require.Nil(t, nErr)
// Ensure that the DM is returned
res, err := ss.Channel().GetSidebarCategory(dmsCategory.Id)
assert.Nil(t, err)
assert.Equal(t, dmsCategory.Id, res.Id)
assert.Equal(t, model.SidebarCategoryDirectMessages, res.Type)
assert.Equal(t, []string{dmChannel.Id}, res.Channels)
})
t.Run("should return any orphaned GM channels with the Direct Messages category", func(t *testing.T) {
userId := model.NewId()
teamId := model.NewId()
// Create the initial categories and find the DMs category
nErr := ss.Channel().CreateInitialSidebarCategories(userId, teamId)
require.Nil(t, nErr)
categories, err := ss.Channel().GetSidebarCategories(userId, teamId)
require.Nil(t, err)
require.Equal(t, model.SidebarCategoryDirectMessages, categories.Categories[2].Type)
dmsCategory := categories.Categories[2]
// Create a GM
gmChannel, nErr := ss.Channel().Save(&model.Channel{
Name: "abc",
TeamId: "",
Type: model.CHANNEL_GROUP,
}, 10)
require.Nil(t, nErr)
_, err = ss.Channel().SaveMember(&model.ChannelMember{
UserId: userId,
ChannelId: gmChannel.Id,
NotifyProps: model.GetDefaultChannelNotifyProps(),
})
require.Nil(t, err)
// Ensure that the DM is returned
res, err := ss.Channel().GetSidebarCategory(dmsCategory.Id)
assert.Nil(t, err)
assert.Equal(t, dmsCategory.Id, res.Id)
assert.Equal(t, model.SidebarCategoryDirectMessages, res.Type)
assert.Equal(t, []string{gmChannel.Id}, res.Channels)
})
t.Run("should return orphaned DM channels in the DMs categorywhich are in a custom category on another team", func(t *testing.T) {
userId := model.NewId()
teamId := model.NewId()
// Create the initial categories and find the DMs category
nErr := ss.Channel().CreateInitialSidebarCategories(userId, teamId)
require.Nil(t, nErr)
categories, err := ss.Channel().GetSidebarCategories(userId, teamId)
require.Nil(t, err)
require.Equal(t, model.SidebarCategoryDirectMessages, categories.Categories[2].Type)
dmsCategory := categories.Categories[2]
// Create a DM
otherUserId := model.NewId()
dmChannel, nErr := ss.Channel().SaveDirectChannel(
&model.Channel{
Name: model.GetDMNameFromIds(userId, otherUserId),
Type: model.CHANNEL_DIRECT,
},
&model.ChannelMember{
UserId: userId,
NotifyProps: model.GetDefaultChannelNotifyProps(),
},
&model.ChannelMember{
UserId: otherUserId,
NotifyProps: model.GetDefaultChannelNotifyProps(),
},
)
require.Nil(t, nErr)
// Create another team and assign the DM to a custom category on that team
otherTeamId := model.NewId()
nErr = ss.Channel().CreateInitialSidebarCategories(userId, otherTeamId)
require.Nil(t, nErr)
_, err = ss.Channel().CreateSidebarCategory(userId, otherTeamId, &model.SidebarCategoryWithChannels{
SidebarCategory: model.SidebarCategory{
UserId: userId,
TeamId: teamId,
},
Channels: []string{dmChannel.Id},
})
require.Nil(t, err)
// Ensure that the DM is returned with the DMs category on the original team
res, err := ss.Channel().GetSidebarCategory(dmsCategory.Id)
assert.Nil(t, err)
assert.Equal(t, dmsCategory.Id, res.Id)
assert.Equal(t, model.SidebarCategoryDirectMessages, res.Type)
assert.Equal(t, []string{dmChannel.Id}, res.Channels)
})
}
func testGetSidebarCategories(t *testing.T, ss store.Store) {
t.Run("should return channels in the same order between different ways of getting categories", func(t *testing.T) {
userId := model.NewId()
teamId := model.NewId()
nErr := ss.Channel().CreateInitialSidebarCategories(userId, teamId)
require.Nil(t, nErr)
channelIds := []string{
model.NewId(),
model.NewId(),
model.NewId(),
}
newCategory, err := ss.Channel().CreateSidebarCategory(userId, teamId, &model.SidebarCategoryWithChannels{
Channels: channelIds,
})
require.Nil(t, err)
require.NotNil(t, newCategory)
gotCategory, err := ss.Channel().GetSidebarCategory(newCategory.Id)
require.Nil(t, err)
res, err := ss.Channel().GetSidebarCategories(userId, teamId)
require.Nil(t, err)
require.Len(t, res.Categories, 4)
require.Equal(t, model.SidebarCategoryCustom, res.Categories[1].Type)
// This looks unnecessary, but I was getting different results from some of these before
assert.Equal(t, newCategory.Channels, res.Categories[1].Channels)
assert.Equal(t, gotCategory.Channels, res.Categories[1].Channels)
assert.Equal(t, channelIds, res.Categories[1].Channels)
})
}
func testUpdateSidebarCategories(t *testing.T, ss store.Store, s SqlSupplier) {
t.Run("ensure the query to update SidebarCategories hasn't been polluted by UpdateSidebarCategoryOrder", func(t *testing.T) {
userId := model.NewId()
teamId := model.NewId()
// Create the initial categories
err := ss.Channel().CreateInitialSidebarCategories(userId, teamId)
require.Nil(t, err)
initialCategories, err := ss.Channel().GetSidebarCategories(userId, teamId)
require.Nil(t, err)
favoritesCategory := initialCategories.Categories[0]
channelsCategory := initialCategories.Categories[1]
dmsCategory := initialCategories.Categories[2]
// And then update one of them
updated, err := ss.Channel().UpdateSidebarCategories(userId, teamId, []*model.SidebarCategoryWithChannels{
channelsCategory,
})
require.Nil(t, err)
assert.Equal(t, channelsCategory, updated[0])
assert.Equal(t, "Channels", updated[0].DisplayName)
// And then reorder the categories
err = ss.Channel().UpdateSidebarCategoryOrder(userId, teamId, []string{dmsCategory.Id, favoritesCategory.Id, channelsCategory.Id})
require.Nil(t, err)
// Which somehow blanks out stuff because ???
got, err := ss.Channel().GetSidebarCategory(favoritesCategory.Id)
require.Nil(t, err)
assert.Equal(t, "Favorites", got.DisplayName)
})
t.Run("categories should be returned in their original order", func(t *testing.T) {
userId := model.NewId()
teamId := model.NewId()
// Create the initial categories
err := ss.Channel().CreateInitialSidebarCategories(userId, teamId)
require.Nil(t, err)
initialCategories, err := ss.Channel().GetSidebarCategories(userId, teamId)
require.Nil(t, err)
favoritesCategory := initialCategories.Categories[0]
channelsCategory := initialCategories.Categories[1]
dmsCategory := initialCategories.Categories[2]
// And then update them
updatedCategories, err := ss.Channel().UpdateSidebarCategories(userId, teamId, []*model.SidebarCategoryWithChannels{
favoritesCategory,
channelsCategory,
dmsCategory,
})
assert.Nil(t, err)
assert.Equal(t, favoritesCategory.Id, updatedCategories[0].Id)
assert.Equal(t, channelsCategory.Id, updatedCategories[1].Id)
assert.Equal(t, dmsCategory.Id, updatedCategories[2].Id)
})
t.Run("should silently fail to update read only fields", func(t *testing.T) {
userId := model.NewId()
teamId := model.NewId()
nErr := ss.Channel().CreateInitialSidebarCategories(userId, teamId)
require.Nil(t, nErr)
initialCategories, err := ss.Channel().GetSidebarCategories(userId, teamId)
require.Nil(t, err)
favoritesCategory := initialCategories.Categories[0]
channelsCategory := initialCategories.Categories[1]
dmsCategory := initialCategories.Categories[2]
customCategory, err := ss.Channel().CreateSidebarCategory(userId, teamId, &model.SidebarCategoryWithChannels{})
require.Nil(t, err)
categoriesToUpdate := []*model.SidebarCategoryWithChannels{
// Try to change the type of Favorites
{
SidebarCategory: model.SidebarCategory{
Id: favoritesCategory.Id,
DisplayName: "something else",
},
Channels: favoritesCategory.Channels,
},
// Try to change the type of Channels
{
SidebarCategory: model.SidebarCategory{
Id: channelsCategory.Id,
Type: model.SidebarCategoryDirectMessages,
},
Channels: channelsCategory.Channels,
},
// Try to change the Channels of DMs
{
SidebarCategory: dmsCategory.SidebarCategory,
Channels: []string{"fakechannel"},
},
// Try to change the UserId/TeamId of a custom category
{
SidebarCategory: model.SidebarCategory{
Id: customCategory.Id,
UserId: model.NewId(),
TeamId: model.NewId(),
Sorting: customCategory.Sorting,
DisplayName: customCategory.DisplayName,
},
Channels: customCategory.Channels,
},
}
updatedCategories, err := ss.Channel().UpdateSidebarCategories(userId, teamId, categoriesToUpdate)
assert.Nil(t, err)
assert.NotEqual(t, "Favorites", categoriesToUpdate[0].DisplayName)
assert.Equal(t, "Favorites", updatedCategories[0].DisplayName)
assert.NotEqual(t, model.SidebarCategoryChannels, categoriesToUpdate[1].Type)
assert.Equal(t, model.SidebarCategoryChannels, updatedCategories[1].Type)
assert.NotEqual(t, []string{}, categoriesToUpdate[2].Channels)
assert.Equal(t, []string{}, updatedCategories[2].Channels)
assert.NotEqual(t, userId, categoriesToUpdate[3].UserId)
assert.Equal(t, userId, updatedCategories[3].UserId)
})
t.Run("should add and remove favorites preferences based on the Favorites category", func(t *testing.T) {
userId := model.NewId()
teamId := model.NewId()
// Create the initial categories and find the favorites category
nErr := ss.Channel().CreateInitialSidebarCategories(userId, teamId)
require.Nil(t, nErr)
categories, err := ss.Channel().GetSidebarCategories(userId, teamId)
require.Nil(t, err)
favoritesCategory := categories.Categories[0]
require.Equal(t, model.SidebarCategoryFavorites, favoritesCategory.Type)
// Join a channel
channel, nErr := ss.Channel().Save(&model.Channel{
Name: "channel",
Type: model.CHANNEL_OPEN,
TeamId: teamId,
}, 10)
require.Nil(t, nErr)
_, err = ss.Channel().SaveMember(&model.ChannelMember{
UserId: userId,
ChannelId: channel.Id,
NotifyProps: model.GetDefaultChannelNotifyProps(),
})
require.Nil(t, err)
// Assign it to favorites
_, err = ss.Channel().UpdateSidebarCategories(userId, teamId, []*model.SidebarCategoryWithChannels{
{
SidebarCategory: favoritesCategory.SidebarCategory,
Channels: []string{channel.Id},
},
})
assert.Nil(t, err)
res, nErr := ss.Preference().Get(userId, model.PREFERENCE_CATEGORY_FAVORITE_CHANNEL, channel.Id)
assert.Nil(t, nErr)
assert.NotNil(t, res)
assert.Equal(t, "true", res.Value)
// And then remove it
channelsCategory := categories.Categories[1]
require.Equal(t, model.SidebarCategoryChannels, channelsCategory.Type)
_, err = ss.Channel().UpdateSidebarCategories(userId, teamId, []*model.SidebarCategoryWithChannels{
{
SidebarCategory: channelsCategory.SidebarCategory,
Channels: []string{channel.Id},
},
})
assert.Nil(t, err)
res, nErr = ss.Preference().Get(userId, model.PREFERENCE_CATEGORY_FAVORITE_CHANNEL, channel.Id)
assert.NotNil(t, nErr)
assert.True(t, errors.Is(nErr, sql.ErrNoRows))
assert.Nil(t, res)
})
t.Run("should add and remove favorites preferences for DMs", func(t *testing.T) {
userId := model.NewId()
teamId := model.NewId()
// Create the initial categories and find the favorites category
nErr := ss.Channel().CreateInitialSidebarCategories(userId, teamId)
require.Nil(t, nErr)
categories, err := ss.Channel().GetSidebarCategories(userId, teamId)
require.Nil(t, err)
favoritesCategory := categories.Categories[0]
require.Equal(t, model.SidebarCategoryFavorites, favoritesCategory.Type)
// Create a direct channel
otherUserId := model.NewId()
dmChannel, nErr := ss.Channel().SaveDirectChannel(
&model.Channel{
Name: model.GetDMNameFromIds(userId, otherUserId),
Type: model.CHANNEL_DIRECT,
},
&model.ChannelMember{
UserId: userId,
NotifyProps: model.GetDefaultChannelNotifyProps(),
},
&model.ChannelMember{
UserId: otherUserId,
NotifyProps: model.GetDefaultChannelNotifyProps(),
},
)
assert.Nil(t, nErr)
// Assign it to favorites
_, err = ss.Channel().UpdateSidebarCategories(userId, teamId, []*model.SidebarCategoryWithChannels{
{
SidebarCategory: favoritesCategory.SidebarCategory,
Channels: []string{dmChannel.Id},
},
})
assert.Nil(t, err)
res, nErr := ss.Preference().Get(userId, model.PREFERENCE_CATEGORY_FAVORITE_CHANNEL, dmChannel.Id)
assert.Nil(t, nErr)
assert.NotNil(t, res)
assert.Equal(t, "true", res.Value)
// And then remove it
dmsCategory := categories.Categories[2]
require.Equal(t, model.SidebarCategoryDirectMessages, dmsCategory.Type)
_, err = ss.Channel().UpdateSidebarCategories(userId, teamId, []*model.SidebarCategoryWithChannels{
{
SidebarCategory: dmsCategory.SidebarCategory,
Channels: []string{dmChannel.Id},
},
})
assert.Nil(t, err)
res, nErr = ss.Preference().Get(userId, model.PREFERENCE_CATEGORY_FAVORITE_CHANNEL, dmChannel.Id)
assert.NotNil(t, nErr)
assert.True(t, errors.Is(nErr, sql.ErrNoRows))
assert.Nil(t, res)
})
t.Run("should add and remove favorites preferences, even if the channel is already favorited in preferences", func(t *testing.T) {
userId := model.NewId()
teamId := model.NewId()
teamId2 := model.NewId()
// Create the initial categories and find the favorites categories in each team
nErr := ss.Channel().CreateInitialSidebarCategories(userId, teamId)
require.Nil(t, nErr)
categories, err := ss.Channel().GetSidebarCategories(userId, teamId)
require.Nil(t, err)
favoritesCategory := categories.Categories[0]
require.Equal(t, model.SidebarCategoryFavorites, favoritesCategory.Type)
nErr = ss.Channel().CreateInitialSidebarCategories(userId, teamId2)
require.Nil(t, nErr)
categories2, err := ss.Channel().GetSidebarCategories(userId, teamId2)
require.Nil(t, err)
favoritesCategory2 := categories2.Categories[0]
require.Equal(t, model.SidebarCategoryFavorites, favoritesCategory2.Type)
// Create a direct channel
otherUserId := model.NewId()
dmChannel, nErr := ss.Channel().SaveDirectChannel(
&model.Channel{
Name: model.GetDMNameFromIds(userId, otherUserId),
Type: model.CHANNEL_DIRECT,
},
&model.ChannelMember{
UserId: userId,
NotifyProps: model.GetDefaultChannelNotifyProps(),
},
&model.ChannelMember{
UserId: otherUserId,
NotifyProps: model.GetDefaultChannelNotifyProps(),
},
)
assert.Nil(t, nErr)
// Assign it to favorites on the first team. The favorites preference gets set for all teams.
_, err = ss.Channel().UpdateSidebarCategories(userId, teamId, []*model.SidebarCategoryWithChannels{
{
SidebarCategory: favoritesCategory.SidebarCategory,
Channels: []string{dmChannel.Id},
},
})
assert.Nil(t, err)
res, nErr := ss.Preference().Get(userId, model.PREFERENCE_CATEGORY_FAVORITE_CHANNEL, dmChannel.Id)
assert.Nil(t, nErr)
assert.NotNil(t, res)
assert.Equal(t, "true", res.Value)
// Assign it to favorites on the second team. The favorites preference is already set.
updated, err := ss.Channel().UpdateSidebarCategories(userId, teamId, []*model.SidebarCategoryWithChannels{
{
SidebarCategory: favoritesCategory2.SidebarCategory,
Channels: []string{dmChannel.Id},
},
})
assert.Nil(t, err)
assert.Equal(t, []string{dmChannel.Id}, updated[0].Channels)
res, nErr = ss.Preference().Get(userId, model.PREFERENCE_CATEGORY_FAVORITE_CHANNEL, dmChannel.Id)
assert.NoError(t, nErr)
assert.NotNil(t, res)
assert.Equal(t, "true", res.Value)
// Remove it from favorites on the first team. This clears the favorites preference for all teams.
_, err = ss.Channel().UpdateSidebarCategories(userId, teamId, []*model.SidebarCategoryWithChannels{
{
SidebarCategory: favoritesCategory.SidebarCategory,
Channels: []string{},
},
})
assert.Nil(t, err)
res, nErr = ss.Preference().Get(userId, model.PREFERENCE_CATEGORY_FAVORITE_CHANNEL, dmChannel.Id)
require.Error(t, nErr)
assert.Nil(t, res)
// Remove it from favorites on the second team. The favorites preference was already deleted.
_, err = ss.Channel().UpdateSidebarCategories(userId, teamId2, []*model.SidebarCategoryWithChannels{
{
SidebarCategory: favoritesCategory2.SidebarCategory,
Channels: []string{},
},
})
assert.Nil(t, err)
res, nErr = ss.Preference().Get(userId, model.PREFERENCE_CATEGORY_FAVORITE_CHANNEL, dmChannel.Id)
require.Error(t, nErr)
assert.Nil(t, res)
})
t.Run("should not affect other users' favorites preferences", func(t *testing.T) {
userId := model.NewId()
teamId := model.NewId()
// Create the initial categories and find the favorites category
nErr := ss.Channel().CreateInitialSidebarCategories(userId, teamId)
require.Nil(t, nErr)
categories, err := ss.Channel().GetSidebarCategories(userId, teamId)
require.Nil(t, err)
favoritesCategory := categories.Categories[0]
require.Equal(t, model.SidebarCategoryFavorites, favoritesCategory.Type)
channelsCategory := categories.Categories[1]
require.Equal(t, model.SidebarCategoryChannels, channelsCategory.Type)
// Create the other users' categories
userId2 := model.NewId()
nErr = ss.Channel().CreateInitialSidebarCategories(userId2, teamId)
require.Nil(t, nErr)
categories2, err := ss.Channel().GetSidebarCategories(userId2, teamId)
require.Nil(t, err)
favoritesCategory2 := categories2.Categories[0]
require.Equal(t, model.SidebarCategoryFavorites, favoritesCategory2.Type)
channelsCategory2 := categories2.Categories[1]
require.Equal(t, model.SidebarCategoryChannels, channelsCategory2.Type)
// Have both users join a channel
channel, nErr := ss.Channel().Save(&model.Channel{
Name: "channel",
Type: model.CHANNEL_OPEN,
TeamId: teamId,
}, 10)
require.Nil(t, nErr)
_, err = ss.Channel().SaveMember(&model.ChannelMember{
UserId: userId,
ChannelId: channel.Id,
NotifyProps: model.GetDefaultChannelNotifyProps(),
})
require.Nil(t, err)
_, err = ss.Channel().SaveMember(&model.ChannelMember{
UserId: userId2,
ChannelId: channel.Id,
NotifyProps: model.GetDefaultChannelNotifyProps(),
})
require.Nil(t, err)
// Have user1 favorite it
_, err = ss.Channel().UpdateSidebarCategories(userId, teamId, []*model.SidebarCategoryWithChannels{
{
SidebarCategory: favoritesCategory.SidebarCategory,
Channels: []string{channel.Id},
},
{
SidebarCategory: channelsCategory.SidebarCategory,
Channels: []string{},
},
})
assert.Nil(t, err)
res, nErr := ss.Preference().Get(userId, model.PREFERENCE_CATEGORY_FAVORITE_CHANNEL, channel.Id)
assert.Nil(t, nErr)
assert.NotNil(t, res)
assert.Equal(t, "true", res.Value)
res, nErr = ss.Preference().Get(userId2, model.PREFERENCE_CATEGORY_FAVORITE_CHANNEL, channel.Id)
assert.True(t, errors.Is(nErr, sql.ErrNoRows))
assert.Nil(t, res)
// And user2 favorite it
_, err = ss.Channel().UpdateSidebarCategories(userId2, teamId, []*model.SidebarCategoryWithChannels{
{
SidebarCategory: favoritesCategory2.SidebarCategory,
Channels: []string{channel.Id},
},
{
SidebarCategory: channelsCategory2.SidebarCategory,
Channels: []string{},
},
})
assert.Nil(t, err)
res, nErr = ss.Preference().Get(userId, model.PREFERENCE_CATEGORY_FAVORITE_CHANNEL, channel.Id)
assert.Nil(t, nErr)
assert.NotNil(t, res)
assert.Equal(t, "true", res.Value)
res, nErr = ss.Preference().Get(userId2, model.PREFERENCE_CATEGORY_FAVORITE_CHANNEL, channel.Id)
assert.Nil(t, nErr)
assert.NotNil(t, res)
assert.Equal(t, "true", res.Value)
// And then user1 unfavorite it
_, err = ss.Channel().UpdateSidebarCategories(userId, teamId, []*model.SidebarCategoryWithChannels{
{
SidebarCategory: channelsCategory.SidebarCategory,
Channels: []string{channel.Id},
},
{
SidebarCategory: favoritesCategory.SidebarCategory,
Channels: []string{},
},
})
assert.Nil(t, err)
res, nErr = ss.Preference().Get(userId, model.PREFERENCE_CATEGORY_FAVORITE_CHANNEL, channel.Id)
assert.True(t, errors.Is(nErr, sql.ErrNoRows))
assert.Nil(t, res)
res, nErr = ss.Preference().Get(userId2, model.PREFERENCE_CATEGORY_FAVORITE_CHANNEL, channel.Id)
assert.Nil(t, nErr)
assert.NotNil(t, res)
assert.Equal(t, "true", res.Value)
// And finally user2 favorite it
_, err = ss.Channel().UpdateSidebarCategories(userId2, teamId, []*model.SidebarCategoryWithChannels{
{
SidebarCategory: channelsCategory2.SidebarCategory,
Channels: []string{channel.Id},
},
{
SidebarCategory: favoritesCategory2.SidebarCategory,
Channels: []string{},
},
})
assert.Nil(t, err)
res, nErr = ss.Preference().Get(userId, model.PREFERENCE_CATEGORY_FAVORITE_CHANNEL, channel.Id)
assert.True(t, errors.Is(nErr, sql.ErrNoRows))
assert.Nil(t, res)
res, nErr = ss.Preference().Get(userId2, model.PREFERENCE_CATEGORY_FAVORITE_CHANNEL, channel.Id)
assert.True(t, errors.Is(nErr, sql.ErrNoRows))
assert.Nil(t, res)
})
t.Run("channels removed from Channels or DMs categories should be re-added", func(t *testing.T) {
userId := model.NewId()
teamId := model.NewId()
// Create some channels
channel, nErr := ss.Channel().Save(&model.Channel{
Name: "channel",
Type: model.CHANNEL_OPEN,
TeamId: teamId,
}, 10)
require.Nil(t, nErr)
_, err := ss.Channel().SaveMember(&model.ChannelMember{
UserId: userId,
ChannelId: channel.Id,
NotifyProps: model.GetDefaultChannelNotifyProps(),
})
require.Nil(t, err)
otherUserId := model.NewId()
dmChannel, nErr := ss.Channel().SaveDirectChannel(
&model.Channel{
Name: model.GetDMNameFromIds(userId, otherUserId),
Type: model.CHANNEL_DIRECT,
},
&model.ChannelMember{
UserId: userId,
NotifyProps: model.GetDefaultChannelNotifyProps(),
},
&model.ChannelMember{
UserId: otherUserId,
NotifyProps: model.GetDefaultChannelNotifyProps(),
},
)
require.Nil(t, nErr)
nErr = ss.Channel().CreateInitialSidebarCategories(userId, teamId)
require.Nil(t, nErr)
// And some categories
initialCategories, err := ss.Channel().GetSidebarCategories(userId, teamId)
require.Nil(t, err)
channelsCategory := initialCategories.Categories[1]
dmsCategory := initialCategories.Categories[2]
require.Equal(t, []string{channel.Id}, channelsCategory.Channels)
require.Equal(t, []string{dmChannel.Id}, dmsCategory.Channels)
// Try to save the categories with no channels in them
categoriesToUpdate := []*model.SidebarCategoryWithChannels{
{
SidebarCategory: channelsCategory.SidebarCategory,
Channels: []string{},
},
{
SidebarCategory: dmsCategory.SidebarCategory,
Channels: []string{},
},
}
updatedCategories, err := ss.Channel().UpdateSidebarCategories(userId, teamId, categoriesToUpdate)
assert.Nil(t, err)
// The channels should still exist in the category because they would otherwise be orphaned
assert.Equal(t, []string{channel.Id}, updatedCategories[0].Channels)
assert.Equal(t, []string{dmChannel.Id}, updatedCategories[1].Channels)
})
t.Run("should be able to move DMs into and out of custom categories", func(t *testing.T) {
userId := model.NewId()
teamId := model.NewId()
otherUserId := model.NewId()
dmChannel, nErr := ss.Channel().SaveDirectChannel(
&model.Channel{
Name: model.GetDMNameFromIds(userId, otherUserId),
Type: model.CHANNEL_DIRECT,
},
&model.ChannelMember{
UserId: userId,
NotifyProps: model.GetDefaultChannelNotifyProps(),
},
&model.ChannelMember{
UserId: otherUserId,
NotifyProps: model.GetDefaultChannelNotifyProps(),
},
)
require.Nil(t, nErr)
nErr = ss.Channel().CreateInitialSidebarCategories(userId, teamId)
require.Nil(t, nErr)
// The DM should start in the DMs category
initialCategories, err := ss.Channel().GetSidebarCategories(userId, teamId)
require.Nil(t, err)
dmsCategory := initialCategories.Categories[2]
require.Equal(t, []string{dmChannel.Id}, dmsCategory.Channels)
// Now move the DM into a custom category
customCategory, err := ss.Channel().CreateSidebarCategory(userId, teamId, &model.SidebarCategoryWithChannels{})
require.Nil(t, err)
categoriesToUpdate := []*model.SidebarCategoryWithChannels{
{
SidebarCategory: dmsCategory.SidebarCategory,
Channels: []string{},
},
{
SidebarCategory: customCategory.SidebarCategory,
Channels: []string{dmChannel.Id},
},
}
updatedCategories, err := ss.Channel().UpdateSidebarCategories(userId, teamId, categoriesToUpdate)
assert.Nil(t, err)
assert.Equal(t, dmsCategory.Id, updatedCategories[0].Id)
assert.Equal(t, []string{}, updatedCategories[0].Channels)
assert.Equal(t, customCategory.Id, updatedCategories[1].Id)
assert.Equal(t, []string{dmChannel.Id}, updatedCategories[1].Channels)
updatedDmsCategory, err := ss.Channel().GetSidebarCategory(dmsCategory.Id)
require.Nil(t, err)
assert.Equal(t, []string{}, updatedDmsCategory.Channels)
updatedCustomCategory, err := ss.Channel().GetSidebarCategory(customCategory.Id)
require.Nil(t, err)
assert.Equal(t, []string{dmChannel.Id}, updatedCustomCategory.Channels)
// And move it back out of the custom category
categoriesToUpdate = []*model.SidebarCategoryWithChannels{
{
SidebarCategory: dmsCategory.SidebarCategory,
Channels: []string{dmChannel.Id},
},
{
SidebarCategory: customCategory.SidebarCategory,
Channels: []string{},
},
}
updatedCategories, err = ss.Channel().UpdateSidebarCategories(userId, teamId, categoriesToUpdate)
assert.Nil(t, err)
assert.Equal(t, dmsCategory.Id, updatedCategories[0].Id)
assert.Equal(t, []string{dmChannel.Id}, updatedCategories[0].Channels)
assert.Equal(t, customCategory.Id, updatedCategories[1].Id)
assert.Equal(t, []string{}, updatedCategories[1].Channels)
updatedDmsCategory, err = ss.Channel().GetSidebarCategory(dmsCategory.Id)
require.Nil(t, err)
assert.Equal(t, []string{dmChannel.Id}, updatedDmsCategory.Channels)
updatedCustomCategory, err = ss.Channel().GetSidebarCategory(customCategory.Id)
require.Nil(t, err)
assert.Equal(t, []string{}, updatedCustomCategory.Channels)
})
t.Run("should successfully move channels between categories", func(t *testing.T) {
userId := model.NewId()
teamId := model.NewId()
// Join a channel
channel, nErr := ss.Channel().Save(&model.Channel{
Name: "channel",
Type: model.CHANNEL_OPEN,
TeamId: teamId,
}, 10)
require.Nil(t, nErr)
_, err := ss.Channel().SaveMember(&model.ChannelMember{
UserId: userId,
ChannelId: channel.Id,
NotifyProps: model.GetDefaultChannelNotifyProps(),
})
require.Nil(t, err)
// And then create the initial categories so that it includes the channel
nErr = ss.Channel().CreateInitialSidebarCategories(userId, teamId)
require.Nil(t, nErr)
initialCategories, err := ss.Channel().GetSidebarCategories(userId, teamId)
require.Nil(t, err)
channelsCategory := initialCategories.Categories[1]
require.Equal(t, []string{channel.Id}, channelsCategory.Channels)
customCategory, err := ss.Channel().CreateSidebarCategory(userId, teamId, &model.SidebarCategoryWithChannels{})
require.Nil(t, err)
// Move the channel one way
updatedCategories, err := ss.Channel().UpdateSidebarCategories(userId, teamId, []*model.SidebarCategoryWithChannels{
{
SidebarCategory: channelsCategory.SidebarCategory,
Channels: []string{},
},
{
SidebarCategory: customCategory.SidebarCategory,
Channels: []string{channel.Id},
},
})
assert.Nil(t, err)
assert.Equal(t, []string{}, updatedCategories[0].Channels)
assert.Equal(t, []string{channel.Id}, updatedCategories[1].Channels)
// And then the other
updatedCategories, err = ss.Channel().UpdateSidebarCategories(userId, teamId, []*model.SidebarCategoryWithChannels{
{
SidebarCategory: channelsCategory.SidebarCategory,
Channels: []string{channel.Id},
},
{
SidebarCategory: customCategory.SidebarCategory,
Channels: []string{},
},
})
assert.Nil(t, err)
assert.Equal(t, []string{channel.Id}, updatedCategories[0].Channels)
assert.Equal(t, []string{}, updatedCategories[1].Channels)
})
}
func testDeleteSidebarCategory(t *testing.T, ss store.Store, s SqlSupplier) {
setupInitialSidebarCategories := func(t *testing.T, ss store.Store) (string, string) {
userId := model.NewId()
teamId := model.NewId()
nErr := ss.Channel().CreateInitialSidebarCategories(userId, teamId)
require.Nil(t, nErr)
res, err := ss.Channel().GetSidebarCategories(userId, teamId)
require.Nil(t, err)
require.Len(t, res.Categories, 3)
return userId, teamId
}
t.Run("should correctly remove an empty category", func(t *testing.T) {
userId, teamId := setupInitialSidebarCategories(t, ss)
defer ss.User().PermanentDelete(userId)
newCategory, err := ss.Channel().CreateSidebarCategory(userId, teamId, &model.SidebarCategoryWithChannels{})
require.Nil(t, err)
require.NotNil(t, newCategory)
// Ensure that the category was created properly
res, err := ss.Channel().GetSidebarCategories(userId, teamId)
require.Nil(t, err)
require.Len(t, res.Categories, 4)
// Then delete it and confirm that was done correctly
err = ss.Channel().DeleteSidebarCategory(newCategory.Id)
assert.Nil(t, err)
res, err = ss.Channel().GetSidebarCategories(userId, teamId)
require.Nil(t, err)
require.Len(t, res.Categories, 3)
})
t.Run("should correctly remove a category and its channels", func(t *testing.T) {
userId, teamId := setupInitialSidebarCategories(t, ss)
defer ss.User().PermanentDelete(userId)
user := &model.User{
Id: userId,
}
// Create some channels
channel1, nErr := ss.Channel().Save(&model.Channel{
Name: model.NewId(),
TeamId: teamId,
Type: model.CHANNEL_OPEN,
}, 1000)
require.Nil(t, nErr)
defer ss.Channel().PermanentDelete(channel1.Id)
channel2, nErr := ss.Channel().Save(&model.Channel{
Name: model.NewId(),
TeamId: teamId,
Type: model.CHANNEL_PRIVATE,
}, 1000)
require.Nil(t, nErr)
defer ss.Channel().PermanentDelete(channel2.Id)
dmChannel1, nErr := ss.Channel().CreateDirectChannel(user, &model.User{
Id: model.NewId(),
})
require.Nil(t, nErr)
defer ss.Channel().PermanentDelete(dmChannel1.Id)
// Assign some of those channels to a custom category
newCategory, err := ss.Channel().CreateSidebarCategory(userId, teamId, &model.SidebarCategoryWithChannels{
Channels: []string{channel1.Id, channel2.Id, dmChannel1.Id},
})
require.Nil(t, err)
require.NotNil(t, newCategory)
// Ensure that the categories are set up correctly
res, err := ss.Channel().GetSidebarCategories(userId, teamId)
require.Nil(t, err)
require.Len(t, res.Categories, 4)
require.Equal(t, model.SidebarCategoryCustom, res.Categories[1].Type)
require.Equal(t, []string{channel1.Id, channel2.Id, dmChannel1.Id}, res.Categories[1].Channels)
// Actually delete the channel
err = ss.Channel().DeleteSidebarCategory(newCategory.Id)
assert.Nil(t, err)
// Confirm that the category was deleted...
res, err = ss.Channel().GetSidebarCategories(userId, teamId)
assert.Nil(t, err)
assert.Len(t, res.Categories, 3)
// ...and that the corresponding SidebarChannel entries were deleted
count, countErr := s.GetMaster().SelectInt(`
SELECT
COUNT(*)
FROM
SidebarChannels
WHERE
CategoryId = :CategoryId`, map[string]interface{}{"CategoryId": newCategory.Id})
require.Nil(t, countErr)
assert.Equal(t, int64(0), count)
})
t.Run("should not allow you to remove non-custom categories", func(t *testing.T) {
userId, teamId := setupInitialSidebarCategories(t, ss)
defer ss.User().PermanentDelete(userId)
res, err := ss.Channel().GetSidebarCategories(userId, teamId)
require.Nil(t, err)
require.Len(t, res.Categories, 3)
require.Equal(t, model.SidebarCategoryFavorites, res.Categories[0].Type)
require.Equal(t, model.SidebarCategoryChannels, res.Categories[1].Type)
require.Equal(t, model.SidebarCategoryDirectMessages, res.Categories[2].Type)
err = ss.Channel().DeleteSidebarCategory(res.Categories[0].Id)
assert.NotNil(t, err)
err = ss.Channel().DeleteSidebarCategory(res.Categories[1].Id)
assert.NotNil(t, err)
err = ss.Channel().DeleteSidebarCategory(res.Categories[2].Id)
assert.NotNil(t, err)
})
} | store/storetest/channel_store_categories.go | 0.558447 | 0.497009 | channel_store_categories.go | starcoder |
package recovery
import (
"context"
"encoding/json"
"testing"
"github.com/bxcodec/faker/v3"
"github.com/gofrs/uuid"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/ory/kratos/driver/config"
"github.com/ory/kratos/identity"
"github.com/ory/kratos/selfservice/form"
"github.com/ory/kratos/text"
"github.com/ory/kratos/x"
)
type (
FlowPersister interface {
CreateRecoveryFlow(context.Context, *Flow) error
GetRecoveryFlow(ctx context.Context, id uuid.UUID) (*Flow, error)
UpdateRecoveryFlow(context.Context, *Flow) error
}
FlowPersistenceProvider interface {
RecoveryFlowPersister() FlowPersister
}
)
func TestFlowPersister(conf *config.Config, p interface {
FlowPersister
identity.PrivilegedPool
}) func(t *testing.T) {
var clearids = func(r *Flow) {
r.ID = uuid.UUID{}
}
ctx := context.Background()
return func(t *testing.T) {
conf.MustSet(config.ViperKeyDefaultIdentitySchemaURL, "file://./stub/identity.schema.json")
t.Run("case=should error when the recovery request does not exist", func(t *testing.T) {
_, err := p.GetRecoveryFlow(ctx, x.NewUUID())
require.Error(t, err)
})
var newFlow = func(t *testing.T) *Flow {
var r Flow
require.NoError(t, faker.FakeData(&r))
clearids(&r)
return &r
}
t.Run("case=should create a new recovery request", func(t *testing.T) {
r := newFlow(t)
err := p.CreateRecoveryFlow(ctx, r)
require.NoError(t, err, "%#v", err)
})
t.Run("case=should create with set ids", func(t *testing.T) {
var r Flow
require.NoError(t, faker.FakeData(&r))
require.NoError(t, p.CreateRecoveryFlow(ctx, &r))
})
t.Run("case=should create and fetch a recovery request", func(t *testing.T) {
expected := newFlow(t)
err := p.CreateRecoveryFlow(ctx, expected)
require.NoError(t, err)
actual, err := p.GetRecoveryFlow(ctx, expected.ID)
require.NoError(t, err)
fexpected, _ := json.Marshal(expected.Methods[StrategyRecoveryLinkName].Config)
factual, _ := json.Marshal(actual.Methods[StrategyRecoveryLinkName].Config)
require.NotEmpty(t, actual.Methods[StrategyRecoveryLinkName].Config.FlowMethodConfigurator.(*form.HTMLForm).Action)
assert.EqualValues(t, expected.ID, actual.ID)
assert.JSONEq(t, string(fexpected), string(factual))
x.AssertEqualTime(t, expected.IssuedAt, actual.IssuedAt)
x.AssertEqualTime(t, expected.ExpiresAt, actual.ExpiresAt)
assert.EqualValues(t, expected.RequestURL, actual.RequestURL)
})
t.Run("case=should create and update a recovery request", func(t *testing.T) {
expected := newFlow(t)
expected.Methods[StrategyRecoveryLinkName] = &FlowMethod{
Method: StrategyRecoveryLinkName, Config: &FlowMethodConfig{FlowMethodConfigurator: &form.HTMLForm{Fields: []form.Field{{
Name: "zab", Type: "bar", Pattern: "baz"}}}}}
expected.Methods["password"] = &FlowMethod{
Method: "password", Config: &FlowMethodConfig{FlowMethodConfigurator: &form.HTMLForm{Fields: []form.Field{{
Name: "foo", Type: "bar", Pattern: "baz"}}}}}
err := p.CreateRecoveryFlow(ctx, expected)
require.NoError(t, err)
expected.Methods[StrategyRecoveryLinkName].Config.FlowMethodConfigurator.(*form.HTMLForm).Action = "/new-action"
expected.Methods["password"].Config.FlowMethodConfigurator.(*form.HTMLForm).Fields = []form.Field{{
Name: "zab", Type: "zab", Pattern: "zab"}}
expected.RequestURL = "/new-request-url"
expected.Active = StrategyRecoveryLinkName
expected.Messages.Add(text.NewRecoveryEmailSent())
require.NoError(t, p.UpdateRecoveryFlow(ctx, expected))
actual, err := p.GetRecoveryFlow(ctx, expected.ID)
require.NoError(t, err)
assert.Equal(t, "/new-action", actual.Methods[StrategyRecoveryLinkName].Config.FlowMethodConfigurator.(*form.HTMLForm).Action)
assert.Equal(t, "/new-request-url", actual.RequestURL)
assert.Equal(t, StrategyRecoveryLinkName, actual.Active.String())
assert.Equal(t, expected.Messages, actual.Messages)
assert.EqualValues(t, []form.Field{{Name: "zab", Type: "zab", Pattern: "zab"}}, actual.
Methods["password"].Config.FlowMethodConfigurator.(*form.HTMLForm).Fields)
assert.EqualValues(t, []form.Field{{Name: "zab", Type: "bar", Pattern: "baz"}}, actual.
Methods[StrategyRecoveryLinkName].Config.FlowMethodConfigurator.(*form.HTMLForm).Fields)
})
t.Run("case=should not cause data loss when updating a request without changes", func(t *testing.T) {
expected := newFlow(t)
err := p.CreateRecoveryFlow(ctx, expected)
require.NoError(t, err)
actual, err := p.GetRecoveryFlow(ctx, expected.ID)
require.NoError(t, err)
assert.Len(t, actual.Methods, 1)
require.NoError(t, p.UpdateRecoveryFlow(ctx, actual))
actual, err = p.GetRecoveryFlow(ctx, expected.ID)
require.NoError(t, err)
require.Len(t, actual.Methods, 1)
js, _ := json.Marshal(actual.Methods)
assert.Equal(t, expected.Methods[StrategyRecoveryLinkName].Config.FlowMethodConfigurator.(*form.HTMLForm).Action,
actual.Methods[StrategyRecoveryLinkName].Config.FlowMethodConfigurator.(*form.HTMLForm).Action, "%s", js)
})
}
} | selfservice/flow/recovery/persistence.go | 0.540439 | 0.457561 | persistence.go | starcoder |
package k_closest_points_to_origin
import (
"math"
"sort"
)
func KClosest(points [][]int, k int) [][]int {
// Precompute the Euclidean distance for each point,
// define the initial binary search range,
// and create a reference list of point indices
distances := make([]float64, len(points))
low, high := 0.0, 0.0
remaining := make([]int, 0)
for i := range points {
distances[i] = float64(euclideanDistance(points[i]))
high = math.Max(high, distances[i])
remaining = append(remaining, i)
}
// Perform a binary search of the distances
// to find the k closest points
closest := make([]int, 0)
for k > 0 {
mid := low + (high - low) / 2
result := splitDistances(remaining, distances, mid)
closer := *result[0]
farther := *result[1]
if len(closer) > k {
// If more than k points are in the closer distances
// then discard the farther points and continue
remaining = closer
high = mid
} else {
// Add the closer points to the answer array and keep
// searching the farther distances for the remaining points
k -= len(closer)
closest = append(closest, closer...)
remaining = farther
low = mid
}
}
// Return the k closest points using the reference indices
k = len(closest)
answer := make([][]int, k)
for i := 0; i < k; i++ {
answer[i] = points[closest[i]]
}
return answer
}
func splitDistances(remaining []int, distances []float64, mid float64) []*[]int {
// Split the distances around the midpoint
// and return them in separate lists
result := make([]*[]int, 0)
closer := make([]int, 0)
farther := make([]int, 0)
result = append(result, &closer)
result = append(result, &farther)
for _, point := range remaining {
if distances[point] <= mid {
closer = append(closer, point)
} else {
farther = append(farther, point)
}
}
return result
}
func kClosest(points [][]int, k int) [][]int {
sort.Slice(points, func(i, j int) bool {
d1 := euclideanDistance(points[i])
d2 := euclideanDistance(points[j])
return d1 < d2
})
res := make([][]int, 0)
for i := 1; i <= k; i++ {
res = append(res, points[i-1])
}
return res
}
func euclideanDistance(point []int) int {
return point[0]*point[0] + point[1]*point[1]
} | golang/k_closest_points_to_origin/k_closest.go | 0.74872 | 0.459622 | k_closest.go | starcoder |
package jit
import (
"io"
"unsafe"
"github.com/damilolarandolph/go-jit/internal/ccall"
)
type Type struct {
*ccall.Type
}
type Types []*Type
func (t Types) raw() ccall.Types {
types := ccall.Types{}
for _, tt := range t {
types = append(types, tt.Type)
}
return types
}
func toType(raw *ccall.Type) *Type {
return &Type{raw}
}
func CreateStruct(fields Types, incref int) *Type {
return toType(ccall.CreateStruct(fields.raw(), incref))
}
func CreateUnion(fields Types, incref int) *Type {
return toType(ccall.CreateUnion(fields.raw(), incref))
}
func CreateSignature(args Types, rtype *Type) *Type {
return toType(ccall.CreateSignature(args.raw(), rtype.Type))
}
func BestAlignment() uint {
return ccall.BestAlignment()
}
func (t *Type) Copy() *Type {
return toType(t.Type.Copy())
}
func (t *Type) Free() {
t.Type.Free()
}
func (t *Type) CreatePointer(incref int) *Type {
return toType(t.Type.CreatePointer(incref))
}
func (t *Type) SetSizeAndAlignment(size, alignment int) {
t.Type.SetSizeAndAlignment(size, alignment)
}
func (t *Type) SetOffset(fieldIndex, offset uint) {
t.Type.SetOffset(fieldIndex, offset)
}
func (t *Type) Kind() int {
return t.Type.Kind()
}
func (t *Type) Size() uint {
return t.Type.Size()
}
func (t *Type) Alignment() uint {
return t.Type.Alignment()
}
func (t *Type) NumFields() uint {
return t.Type.NumFields()
}
func (t *Type) Field(index uint) *Type {
return toType(t.Type.Field(index))
}
func (t *Type) Offset(index uint) uint {
return t.Type.Offset(index)
}
func (t *Type) Name(index uint) string {
return t.Type.Name(index)
}
func (t *Type) FindName(name string) uint {
return t.Type.FindName(name)
}
func (t *Type) NumParams() uint {
return t.Type.NumParams()
}
func (t *Type) Return() *Type {
return toType(t.Type.Return())
}
func (t *Type) Param(index uint) *Type {
return toType(t.Type.Param(index))
}
func (t *Type) Ref() *Type {
return toType(t.Type.Ref())
}
func (t *Type) TaggedType() *Type {
return toType(t.Type.TaggedType())
}
func (t *Type) SetTaggedType(underlying *Type, incref int) {
t.Type.SetTaggedType(underlying.Type, incref)
}
func (t *Type) TaggedKind() int {
return t.Type.TaggedKind()
}
func (t *Type) TaggedData() unsafe.Pointer {
return t.Type.TaggedData()
}
func (t *Type) IsPrimitive() bool {
return t.Type.IsPrimitive()
}
func (t *Type) IsStruct() bool {
return t.Type.IsStruct()
}
func (t *Type) IsUnion() bool {
return t.Type.IsUnion()
}
func (t *Type) IsSignature() bool {
return t.Type.IsSignature()
}
func (t *Type) IsPointer() bool {
return t.Type.IsPointer()
}
func (t *Type) IsTagged() bool {
return t.Type.IsTagged()
}
func (t *Type) RemoveTags() *Type {
return toType(t.Type.RemoveTags())
}
func (t *Type) Normalize() *Type {
return toType(t.Type.Normalize())
}
func (t *Type) PromoteInt() *Type {
return toType(t.Type.PromoteInt())
}
func (t *Type) ReturnViaPointer() int {
return t.Type.ReturnViaPointer()
}
func (t *Type) HasTag(kind int) bool {
return t.Type.HasTag(kind)
}
func (t *Type) Dump(w io.Writer) error {
return t.Type.Dump(w)
} | type.go | 0.575827 | 0.573141 | type.go | starcoder |
package main
import (
"fmt"
"io/ioutil"
"os"
"strings"
"github.com/Ullaakut/aoc19/pkg/aocutils"
"github.com/Ullaakut/disgo"
"github.com/Ullaakut/disgo/style"
"github.com/fatih/color"
)
var format = map[rune]func(string, ...interface{}) string{
'o': color.RedString,
'X': color.GreenString,
'+': color.WhiteString,
'-': color.WhiteString,
'|': color.WhiteString,
}
type Wire struct {
ID int
Positions []aocutils.Vector2D
}
func main() {
disgo.StartStep("Reading input file")
content, err := ioutil.ReadFile("../input.txt")
if err != nil {
_ = disgo.FailStepf("unable to read file ../input.txt: %w", err)
os.Exit(1)
}
disgo.EndStep()
result := solve(strings.TrimSpace(string(content)))
disgo.Infoln(style.Success(style.SymbolCheck, " Closest intersection to central port:"), result)
os.Exit(0)
}
func solve(content string) int {
grid := NewGrid(format)
// Set central port.
grid.g[aocutils.NewVector2D(0, 0)] = Cell{-1, 'o'}
var xMax, xMin, yMax, yMin int
var wires []Wire
for wireID, wirePath := range strings.Split(content, "\n") {
var positions []aocutils.Vector2D
var circuitPosition aocutils.Vector2D
for _, pathPart := range strings.Split(wirePath, ",") {
// Compute which direction to go towards and which
// character to use to print it in the grid, depending
// on the direction of the instruction.
direction, r := computeDirection(rune(pathPart[0]))
// If we are changing directions, set a + at the previous position
// before moving the circuit further.
if !circuitPosition.IsUnset() {
grid.g[circuitPosition] = Cell{wireID, '+'}
}
// Get distance to travel in direction
distance := aocutils.Atoi(pathPart[1:])
// Iterate on each cell to travel through.
for i := 0; i < distance; i++ {
// Get cell position from current circuit position + direction
// multiplied by the iteration number.
cellPosition := circuitPosition.Add(direction.Mul(i))
if cellPosition.IsUnset() {
continue
}
positions = append(positions, cellPosition)
// Set boundaries to be able to render the grid later on.
xMax, xMin, yMax, yMin = checkLimits(cellPosition, xMax, xMin, yMax, yMin)
// There's already another circuit going through here!
cell, exists := grid.g[cellPosition]
if exists && cell.wireID != wireID {
grid.g[cellPosition] = Cell{wireID, 'X'}
continue
}
// Set the cell in the grid with the right rune depending on
// which direction the wire is going.
grid.g[cellPosition] = Cell{wireID, r}
}
// Update the circuit position.
circuitPosition = circuitPosition.Add(direction.Mul(distance))
}
wires = append(wires, Wire{
ID: wireID,
Positions: positions,
})
}
// Display the grid.
// It is recommended to disable it for large inputs.
grid.DisplaySquare(xMax, xMin, yMax, yMin)
// Find the closest intersection to the origin point.
return findClosestIntersection(wires, grid)
}
// findClosestIntersection looks through the wires, and stores the
// positions where they intersect and their distance relative to
// the beginning of the wire. It then returns the intersection with
// the smallest relative distance.
func findClosestIntersection(wires []Wire, grid Grid) int {
intersections := make(map[aocutils.Vector2D]int)
for _, wire := range wires {
for dist, pos := range wire.Positions {
if grid.Cell(pos) == 'X' {
intersections[pos] = intersections[pos] + dist + 1
}
}
}
var closestIntersection int
for _, distance := range intersections {
if closestIntersection == 0 {
closestIntersection = distance
}
fmt.Println("Intersection found with distance", distance)
closestIntersection = aocutils.MinInt(closestIntersection, distance)
}
return closestIntersection
}
func computeDirection(dirRune rune) (aocutils.Vector2D, rune) {
switch dirRune {
case 'R':
return aocutils.NewVector2D(1, 0), '-'
case 'L':
return aocutils.NewVector2D(-1, 0), '-'
case 'U':
return aocutils.NewVector2D(0, 1), '|'
case 'D':
return aocutils.NewVector2D(0, -1), '|'
default:
return aocutils.NewVector2D(0, 0), '$'
}
}
func checkLimits(pos aocutils.Vector2D, xMax, xMin, yMax, yMin int) (int, int, int, int) {
xMax = aocutils.MaxInt(xMax, pos.X())
xMin = aocutils.MinInt(xMin, pos.X())
yMax = aocutils.MaxInt(yMax, pos.Y())
yMin = aocutils.MinInt(yMin, pos.Y())
return xMax, xMin, yMax, yMin
} | Day03/Part2/main.go | 0.583797 | 0.427337 | main.go | starcoder |
package models
import (
i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91 "github.com/microsoft/kiota-abstractions-go/serialization"
)
// AccessReviewScheduleSettings
type AccessReviewScheduleSettings struct {
// Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
additionalData map[string]interface{}
// Optional field. Describes the actions to take once a review is complete. There are two types that are currently supported: removeAccessApplyAction (default) and disableAndDeleteUserApplyAction. Field only needs to be specified in the case of disableAndDeleteUserApplyAction.
applyActions []AccessReviewApplyActionable
// Indicates whether decisions are automatically applied. When set to false, an admin must apply the decisions manually once the reviewer completes the access review. When set to true, decisions are applied automatically after the access review instance duration ends, whether or not the reviewers have responded. Default value is false.
autoApplyDecisionsEnabled *bool
// Decision chosen if defaultDecisionEnabled is enabled. Can be one of Approve, Deny, or Recommendation.
defaultDecision *string
// Indicates whether the default decision is enabled or disabled when reviewers do not respond. Default value is false.
defaultDecisionEnabled *bool
// Duration of each recurrence of review (accessReviewInstance) in number of days. NOTE: If the stageSettings of the accessReviewScheduleDefinition object is defined, its durationInDays setting will be used instead of the value of this property.
instanceDurationInDays *int32
// Indicates whether reviewers are required to provide justification with their decision. Default value is false.
justificationRequiredOnApproval *bool
// Indicates whether emails are enabled or disabled. Default value is false.
mailNotificationsEnabled *bool
// Indicates whether decision recommendations are enabled or disabled. NOTE: If the stageSettings of the accessReviewScheduleDefinition object is defined, its recommendationsEnabled setting will be used instead of the value of this property.
recommendationsEnabled *bool
// Detailed settings for recurrence using the standard Outlook recurrence object. Note: Only dayOfMonth, interval, and type (weekly, absoluteMonthly) properties are supported. Use the property startDate on recurrenceRange to determine the day the review starts.
recurrence PatternedRecurrenceable
// Indicates whether reminders are enabled or disabled. Default value is false.
reminderNotificationsEnabled *bool
}
// NewAccessReviewScheduleSettings instantiates a new accessReviewScheduleSettings and sets the default values.
func NewAccessReviewScheduleSettings()(*AccessReviewScheduleSettings) {
m := &AccessReviewScheduleSettings{
}
m.SetAdditionalData(make(map[string]interface{}));
return m
}
// CreateAccessReviewScheduleSettingsFromDiscriminatorValue creates a new instance of the appropriate class based on discriminator value
func CreateAccessReviewScheduleSettingsFromDiscriminatorValue(parseNode i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, error) {
return NewAccessReviewScheduleSettings(), nil
}
// GetAdditionalData gets the additionalData property value. Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
func (m *AccessReviewScheduleSettings) GetAdditionalData()(map[string]interface{}) {
if m == nil {
return nil
} else {
return m.additionalData
}
}
// GetApplyActions gets the applyActions property value. Optional field. Describes the actions to take once a review is complete. There are two types that are currently supported: removeAccessApplyAction (default) and disableAndDeleteUserApplyAction. Field only needs to be specified in the case of disableAndDeleteUserApplyAction.
func (m *AccessReviewScheduleSettings) GetApplyActions()([]AccessReviewApplyActionable) {
if m == nil {
return nil
} else {
return m.applyActions
}
}
// GetAutoApplyDecisionsEnabled gets the autoApplyDecisionsEnabled property value. Indicates whether decisions are automatically applied. When set to false, an admin must apply the decisions manually once the reviewer completes the access review. When set to true, decisions are applied automatically after the access review instance duration ends, whether or not the reviewers have responded. Default value is false.
func (m *AccessReviewScheduleSettings) GetAutoApplyDecisionsEnabled()(*bool) {
if m == nil {
return nil
} else {
return m.autoApplyDecisionsEnabled
}
}
// GetDefaultDecision gets the defaultDecision property value. Decision chosen if defaultDecisionEnabled is enabled. Can be one of Approve, Deny, or Recommendation.
func (m *AccessReviewScheduleSettings) GetDefaultDecision()(*string) {
if m == nil {
return nil
} else {
return m.defaultDecision
}
}
// GetDefaultDecisionEnabled gets the defaultDecisionEnabled property value. Indicates whether the default decision is enabled or disabled when reviewers do not respond. Default value is false.
func (m *AccessReviewScheduleSettings) GetDefaultDecisionEnabled()(*bool) {
if m == nil {
return nil
} else {
return m.defaultDecisionEnabled
}
}
// GetFieldDeserializers the deserialization information for the current model
func (m *AccessReviewScheduleSettings) GetFieldDeserializers()(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error)) {
res := make(map[string]func(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode)(error))
res["applyActions"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetCollectionOfObjectValues(CreateAccessReviewApplyActionFromDiscriminatorValue)
if err != nil {
return err
}
if val != nil {
res := make([]AccessReviewApplyActionable, len(val))
for i, v := range val {
res[i] = v.(AccessReviewApplyActionable)
}
m.SetApplyActions(res)
}
return nil
}
res["autoApplyDecisionsEnabled"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetBoolValue()
if err != nil {
return err
}
if val != nil {
m.SetAutoApplyDecisionsEnabled(val)
}
return nil
}
res["defaultDecision"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetStringValue()
if err != nil {
return err
}
if val != nil {
m.SetDefaultDecision(val)
}
return nil
}
res["defaultDecisionEnabled"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetBoolValue()
if err != nil {
return err
}
if val != nil {
m.SetDefaultDecisionEnabled(val)
}
return nil
}
res["instanceDurationInDays"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetInt32Value()
if err != nil {
return err
}
if val != nil {
m.SetInstanceDurationInDays(val)
}
return nil
}
res["justificationRequiredOnApproval"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetBoolValue()
if err != nil {
return err
}
if val != nil {
m.SetJustificationRequiredOnApproval(val)
}
return nil
}
res["mailNotificationsEnabled"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetBoolValue()
if err != nil {
return err
}
if val != nil {
m.SetMailNotificationsEnabled(val)
}
return nil
}
res["recommendationsEnabled"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetBoolValue()
if err != nil {
return err
}
if val != nil {
m.SetRecommendationsEnabled(val)
}
return nil
}
res["recurrence"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetObjectValue(CreatePatternedRecurrenceFromDiscriminatorValue)
if err != nil {
return err
}
if val != nil {
m.SetRecurrence(val.(PatternedRecurrenceable))
}
return nil
}
res["reminderNotificationsEnabled"] = func (n i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.ParseNode) error {
val, err := n.GetBoolValue()
if err != nil {
return err
}
if val != nil {
m.SetReminderNotificationsEnabled(val)
}
return nil
}
return res
}
// GetInstanceDurationInDays gets the instanceDurationInDays property value. Duration of each recurrence of review (accessReviewInstance) in number of days. NOTE: If the stageSettings of the accessReviewScheduleDefinition object is defined, its durationInDays setting will be used instead of the value of this property.
func (m *AccessReviewScheduleSettings) GetInstanceDurationInDays()(*int32) {
if m == nil {
return nil
} else {
return m.instanceDurationInDays
}
}
// GetJustificationRequiredOnApproval gets the justificationRequiredOnApproval property value. Indicates whether reviewers are required to provide justification with their decision. Default value is false.
func (m *AccessReviewScheduleSettings) GetJustificationRequiredOnApproval()(*bool) {
if m == nil {
return nil
} else {
return m.justificationRequiredOnApproval
}
}
// GetMailNotificationsEnabled gets the mailNotificationsEnabled property value. Indicates whether emails are enabled or disabled. Default value is false.
func (m *AccessReviewScheduleSettings) GetMailNotificationsEnabled()(*bool) {
if m == nil {
return nil
} else {
return m.mailNotificationsEnabled
}
}
// GetRecommendationsEnabled gets the recommendationsEnabled property value. Indicates whether decision recommendations are enabled or disabled. NOTE: If the stageSettings of the accessReviewScheduleDefinition object is defined, its recommendationsEnabled setting will be used instead of the value of this property.
func (m *AccessReviewScheduleSettings) GetRecommendationsEnabled()(*bool) {
if m == nil {
return nil
} else {
return m.recommendationsEnabled
}
}
// GetRecurrence gets the recurrence property value. Detailed settings for recurrence using the standard Outlook recurrence object. Note: Only dayOfMonth, interval, and type (weekly, absoluteMonthly) properties are supported. Use the property startDate on recurrenceRange to determine the day the review starts.
func (m *AccessReviewScheduleSettings) GetRecurrence()(PatternedRecurrenceable) {
if m == nil {
return nil
} else {
return m.recurrence
}
}
// GetReminderNotificationsEnabled gets the reminderNotificationsEnabled property value. Indicates whether reminders are enabled or disabled. Default value is false.
func (m *AccessReviewScheduleSettings) GetReminderNotificationsEnabled()(*bool) {
if m == nil {
return nil
} else {
return m.reminderNotificationsEnabled
}
}
// Serialize serializes information the current object
func (m *AccessReviewScheduleSettings) Serialize(writer i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.SerializationWriter)(error) {
if m.GetApplyActions() != nil {
cast := make([]i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable, len(m.GetApplyActions()))
for i, v := range m.GetApplyActions() {
cast[i] = v.(i878a80d2330e89d26896388a3f487eef27b0a0e6c010c493bf80be1452208f91.Parsable)
}
err := writer.WriteCollectionOfObjectValues("applyActions", cast)
if err != nil {
return err
}
}
{
err := writer.WriteBoolValue("autoApplyDecisionsEnabled", m.GetAutoApplyDecisionsEnabled())
if err != nil {
return err
}
}
{
err := writer.WriteStringValue("defaultDecision", m.GetDefaultDecision())
if err != nil {
return err
}
}
{
err := writer.WriteBoolValue("defaultDecisionEnabled", m.GetDefaultDecisionEnabled())
if err != nil {
return err
}
}
{
err := writer.WriteInt32Value("instanceDurationInDays", m.GetInstanceDurationInDays())
if err != nil {
return err
}
}
{
err := writer.WriteBoolValue("justificationRequiredOnApproval", m.GetJustificationRequiredOnApproval())
if err != nil {
return err
}
}
{
err := writer.WriteBoolValue("mailNotificationsEnabled", m.GetMailNotificationsEnabled())
if err != nil {
return err
}
}
{
err := writer.WriteBoolValue("recommendationsEnabled", m.GetRecommendationsEnabled())
if err != nil {
return err
}
}
{
err := writer.WriteObjectValue("recurrence", m.GetRecurrence())
if err != nil {
return err
}
}
{
err := writer.WriteBoolValue("reminderNotificationsEnabled", m.GetReminderNotificationsEnabled())
if err != nil {
return err
}
}
{
err := writer.WriteAdditionalData(m.GetAdditionalData())
if err != nil {
return err
}
}
return nil
}
// SetAdditionalData sets the additionalData property value. Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.
func (m *AccessReviewScheduleSettings) SetAdditionalData(value map[string]interface{})() {
if m != nil {
m.additionalData = value
}
}
// SetApplyActions sets the applyActions property value. Optional field. Describes the actions to take once a review is complete. There are two types that are currently supported: removeAccessApplyAction (default) and disableAndDeleteUserApplyAction. Field only needs to be specified in the case of disableAndDeleteUserApplyAction.
func (m *AccessReviewScheduleSettings) SetApplyActions(value []AccessReviewApplyActionable)() {
if m != nil {
m.applyActions = value
}
}
// SetAutoApplyDecisionsEnabled sets the autoApplyDecisionsEnabled property value. Indicates whether decisions are automatically applied. When set to false, an admin must apply the decisions manually once the reviewer completes the access review. When set to true, decisions are applied automatically after the access review instance duration ends, whether or not the reviewers have responded. Default value is false.
func (m *AccessReviewScheduleSettings) SetAutoApplyDecisionsEnabled(value *bool)() {
if m != nil {
m.autoApplyDecisionsEnabled = value
}
}
// SetDefaultDecision sets the defaultDecision property value. Decision chosen if defaultDecisionEnabled is enabled. Can be one of Approve, Deny, or Recommendation.
func (m *AccessReviewScheduleSettings) SetDefaultDecision(value *string)() {
if m != nil {
m.defaultDecision = value
}
}
// SetDefaultDecisionEnabled sets the defaultDecisionEnabled property value. Indicates whether the default decision is enabled or disabled when reviewers do not respond. Default value is false.
func (m *AccessReviewScheduleSettings) SetDefaultDecisionEnabled(value *bool)() {
if m != nil {
m.defaultDecisionEnabled = value
}
}
// SetInstanceDurationInDays sets the instanceDurationInDays property value. Duration of each recurrence of review (accessReviewInstance) in number of days. NOTE: If the stageSettings of the accessReviewScheduleDefinition object is defined, its durationInDays setting will be used instead of the value of this property.
func (m *AccessReviewScheduleSettings) SetInstanceDurationInDays(value *int32)() {
if m != nil {
m.instanceDurationInDays = value
}
}
// SetJustificationRequiredOnApproval sets the justificationRequiredOnApproval property value. Indicates whether reviewers are required to provide justification with their decision. Default value is false.
func (m *AccessReviewScheduleSettings) SetJustificationRequiredOnApproval(value *bool)() {
if m != nil {
m.justificationRequiredOnApproval = value
}
}
// SetMailNotificationsEnabled sets the mailNotificationsEnabled property value. Indicates whether emails are enabled or disabled. Default value is false.
func (m *AccessReviewScheduleSettings) SetMailNotificationsEnabled(value *bool)() {
if m != nil {
m.mailNotificationsEnabled = value
}
}
// SetRecommendationsEnabled sets the recommendationsEnabled property value. Indicates whether decision recommendations are enabled or disabled. NOTE: If the stageSettings of the accessReviewScheduleDefinition object is defined, its recommendationsEnabled setting will be used instead of the value of this property.
func (m *AccessReviewScheduleSettings) SetRecommendationsEnabled(value *bool)() {
if m != nil {
m.recommendationsEnabled = value
}
}
// SetRecurrence sets the recurrence property value. Detailed settings for recurrence using the standard Outlook recurrence object. Note: Only dayOfMonth, interval, and type (weekly, absoluteMonthly) properties are supported. Use the property startDate on recurrenceRange to determine the day the review starts.
func (m *AccessReviewScheduleSettings) SetRecurrence(value PatternedRecurrenceable)() {
if m != nil {
m.recurrence = value
}
}
// SetReminderNotificationsEnabled sets the reminderNotificationsEnabled property value. Indicates whether reminders are enabled or disabled. Default value is false.
func (m *AccessReviewScheduleSettings) SetReminderNotificationsEnabled(value *bool)() {
if m != nil {
m.reminderNotificationsEnabled = value
}
} | models/access_review_schedule_settings.go | 0.745769 | 0.466603 | access_review_schedule_settings.go | starcoder |
package ahrs
import (
"log"
"math"
"fmt"
"github.com/skelterjohn/go.matrix"
)
type Kalman0State struct {
State
f *matrix.DenseMatrix
z *Measurement
y *matrix.DenseMatrix
h *matrix.DenseMatrix
ss *matrix.DenseMatrix
kk *matrix.DenseMatrix
}
// Initialize the state at the start of the Kalman filter, based on current measurements
func NewKalman0AHRS() (s *Kalman0State) {
s = new(Kalman0State)
s.needsInitialization = true
s.aNorm = 1
s.E0 = 1 // Initial guess is East
s.F0 = 1 // Initial guess is that it's oriented pointing forward and level
s.normalize()
s.M = matrix.Zeros(32, 32)
s.N = matrix.Zeros(32, 32)
s.f = matrix.Eye(32)
s.z = NewMeasurement()
s.y = matrix.Zeros(15, 1)
s.h = matrix.Zeros(15, 32)
s.ss = matrix.Zeros(32, 15)
s.kk = matrix.Zeros(32, 15)
s.logMap = make(map[string]interface{})
s.updateLogMap(NewMeasurement(), s.logMap)
s.gLoad = 1
return
}
func (s *Kalman0State) init(m *Measurement) {
s.needsInitialization = false
s.E0, s.E1, s.E2, s.E3 = 1, 0, 0, 0 // Initial guess is East
s.F0, s.F1, s.F2, s.F3 = 1, 0, 0, 0
s.normalize()
s.T = m.T
// Diagonal matrix of initial state uncertainties, will be squared into covariance below
// Specifics here aren't too important--it will change very quickly
s.M = matrix.Diagonal([]float64{
Big, Big, Big, // U*3
Big, Big, Big, // Z*3
1, 1, Big, Big, // E*4
2, Big, Big, // H*3
Big, Big, Big, // N*3
Big, Big, Big, // V*3
Big, Big, Big, // C*3
Big, Big, Big, Big, // F*4
2, Big, Big, // D*3
Big, Big, Big, // L*3
})
s.M = matrix.Product(s.M, s.M)
// Diagonal matrix of state process uncertainties per s, will be squared into covariance below
// Tuning these is more important
tt := math.Sqrt(60.0 * 60.0) // One-hour time constant for drift of biases V, C, F, D, L
s.N = matrix.Diagonal([]float64{
Big, Big, Big, // U*3
Big, Big, Big, // Z*3
0.05, 0.05, Big, Big, // E*4
50, Big, Big, // H*3
Big, Big, Big, // N*3
Big, Big, Big, // V*3
Big, Big, Big, // C*3
Big, Big, Big, Big, // F*4
0.1 / tt, Big, Big, // D*3
Big, Big, Big, // L*3
})
s.N = matrix.Product(s.N, s.N)
s.updateLogMap(m, s.logMap)
log.Println("Kalman0 Initialized")
return
}
// Compute runs first the prediction and then the update phases of the Kalman filter
func (s *Kalman0State) Compute(m *Measurement) {
m.A1, m.A2, m.A3 = s.rotateByF(m.A1, m.A2, m.A3, false)
m.B1, m.B2, m.B3 = s.rotateByF(m.B1, m.B2, m.B3, false)
if s.needsInitialization {
s.init(m)
return
}
s.predict(m.T)
s.update(m)
s.updateLogMap(m, s.logMap)
}
// predict performs the prediction phase of the Kalman filter
func (s *Kalman0State) predict(t float64) {
dt := t - s.T
// State vectors H and D are unchanged; only E evolves.
s.E0, s.E1, s.E2, s.E3 = QuaternionRotate(s.E0, s.E1, s.E2, s.E3, s.H1*dt*Deg, 0, 0)
s.T = t
s.calcJacobianState(t)
s.M = matrix.Sum(matrix.Product(s.f, matrix.Product(s.M, s.f.Transpose())), matrix.Scaled(s.N, dt))
}
// predictMeasurement returns the measurement expected given the current state.
func (s *Kalman0State) predictMeasurement() (m *Measurement) {
m = NewMeasurement()
m.SValid = true
_, m.A2, m.A3 = s.rotateByE(0, 0, 1, true)
m.B1 = s.H1 + s.D1
m.T = s.T
return
}
// update applies the Kalman filter corrections given the measurements
func (s *Kalman0State) update(m *Measurement) {
s.z = s.predictMeasurement()
s.y.Set(7, 0, m.A2-s.z.A2)
s.y.Set(8, 0, m.A3-s.z.A3)
s.y.Set(9, 0, m.B1-s.z.B1)
s.calcJacobianMeasurement()
var v float64
_, _, v = m.Accums[7](m.A2)
m.M.Set(7, 7, v)
_, _, v = m.Accums[8](m.A3)
m.M.Set(8, 8, v)
_, _, v = m.Accums[9](m.B1)
m.M.Set(9, 9, v)
s.ss = matrix.Sum(matrix.Product(s.h, matrix.Product(s.M, s.h.Transpose())), m.M)
m2, err := s.ss.Inverse()
if err != nil {
log.Println("AHRS: Can't invert Kalman gain matrix")
log.Printf("ss: %s\n", s.ss)
return
}
s.kk = matrix.Product(s.M, matrix.Product(s.h.Transpose(), m2))
su := matrix.Product(s.kk, s.y)
s.E0 += su.Get(6, 0)
s.E1 += su.Get(7, 0)
s.H1 += su.Get(10, 0)
s.D1 += su.Get(26, 0)
s.T = m.T
s.M = matrix.Product(matrix.Difference(matrix.Eye(32), matrix.Product(s.kk, s.h)), s.M)
s.normalize()
}
func (s *Kalman0State) calcJacobianState(t float64) {
dt := t - s.T
// U*3, Z*3, E*4, H*3, N*3,
// V*3, C*3, F*4, D*3, L*3
// s.E0 += -0.5*dt*s.E1*s.H1*Deg
s.f.Set(6, 7, -0.5*dt*s.H1*Deg) // E0/E1
s.f.Set(6, 10, -0.5*dt*s.E1*Deg) // E0/H1
//s.E1 += +0.5*dt*s.E0*s.H1*Deg
s.f.Set(7, 6, 0.5*dt*s.H1*Deg) // E1/E0
s.f.Set(7, 10, 0.5*dt*s.E0*Deg) // E1/H1
// H and D are unchanged.
return
}
func (s *Kalman0State) calcJacobianMeasurement() {
// U*3, Z*3, E*4, H*3, N*3,
// V*3, C*3, F*4, D*3, L*3
// U*3, W*3, A*3, B*3, M*3
// m.A2 = 2*s.E0*s.E1
s.h.Set(7, 6, 2*s.E1) // A2/E0
s.h.Set(7, 7, 2*s.E0) // A2/E1
// m.A3 = s.E0*s.E0 - s.E1*s.E1
s.h.Set(8, 6, +2*s.E0) // A3/E0
s.h.Set(8, 7, -2*s.E1) // A3/E1
// m.B1 = s.H1 + s.D1
s.h.Set(9, 10, 1) // B1/H1
s.h.Set(9, 26, 1) // B1/D1
return
}
// SetCalibrations sets the AHRS accelerometer calibrations to c and gyro calibrations to d.
func (s *Kalman0State) SetCalibrations(c, d *[3]float64) {
return
}
func (s *Kalman0State) updateLogMap(m *Measurement, p map[string]interface{}) {
s.State.updateLogMap(m, s.logMap)
/*
rv, pv, hv := s.State.RollPitchHeadingUncertainty()
p["PitchVar"] = pv / Deg
*/
for k, v := range map[string]*matrix.DenseMatrix {
"M": s.M, // M is the state uncertainty covariance matrix
"N": s.N, // N is the process uncertainty covariance matrix
"f": s.f, // f is the State Jacobian
"y": s.y, // y is the correction between actual and predicted measurements
"h": s.h, // h is
"ss": s.ss, // ss is
"kk": s.kk, // kk is
} {
r, c := v.GetSize()
for i := 0; i < r; i++ {
for j := 0; j < c; j++ {
p[fmt.Sprintf("%s[%02d_%02d]", k, i, j)] = v.Get(i, j)
}
}
}
// z is the predicted measurement
p["zA1"] = s.z.A1
p["zB2"] = s.z.B2
}
var Kalman0JSONConfig = `{
"State": [
["Pitch", "PitchActual", 0],
["T", null, null],
["E0", "E0Actual", null],
["E1", "E1Actual", null],
["H2", "H2Actual", 0],
["D2", "D2Actual", 0],
],
"Measurement": [
["A1", null, 0],
["B2", null, 0],
]
}` | ahrs/ahrs_kalman0.go | 0.747339 | 0.661797 | ahrs_kalman0.go | starcoder |
Package hilbert implements a Hilbert R-tree based on PALM principles
to improve multithreaded performance. This package is not quite complete
and some optimization and delete codes remain to be completed.
This serves as a potential replacement for the interval tree and
rangetree.
Benchmarks:
BenchmarkBulkAddPoints-8 500 2589270 ns/op
BenchmarkBulkUpdatePoints-8 2000 1212641 ns/op
BenchmarkPointInsertion-8 200000 9135 ns/op
BenchmarkQueryPoints-8 500000 3122 ns/op
*/
package hilbert
import (
"runtime"
"sync"
"sync/atomic"
"github.com/Workiva/go-datastructures/queue"
"github.com/Workiva/go-datastructures/rtree"
)
type operation int
const (
get operation = iota
add
remove
)
const multiThreadAt = 1000 // number of keys before we multithread lookups
type keyBundle struct {
key hilbert
left, right rtree.Rectangle
}
type tree struct {
root *node
_ [8]uint64
number uint64
_ [8]uint64
ary, bufferSize uint64
actions *queue.RingBuffer
cache []interface{}
_ [8]uint64
disposed uint64
_ [8]uint64
running uint64
}
func (tree *tree) checkAndRun(action action) {
if tree.actions.Len() > 0 {
if action != nil {
tree.actions.Put(action)
}
if atomic.CompareAndSwapUint64(&tree.running, 0, 1) {
var a interface{}
var err error
for tree.actions.Len() > 0 {
a, err = tree.actions.Get()
if err != nil {
return
}
tree.cache = append(tree.cache, a)
if uint64(len(tree.cache)) >= tree.bufferSize {
break
}
}
go tree.operationRunner(tree.cache, true)
}
} else if action != nil {
if atomic.CompareAndSwapUint64(&tree.running, 0, 1) {
switch action.operation() {
case get:
ga := action.(*getAction)
result := tree.search(ga.lookup)
ga.result = result
action.complete()
tree.reset()
case add, remove:
if len(action.keys()) > multiThreadAt {
tree.operationRunner(interfaces{action}, true)
} else {
tree.operationRunner(interfaces{action}, false)
}
}
} else {
tree.actions.Put(action)
tree.checkAndRun(nil)
}
}
}
func (tree *tree) init(bufferSize, ary uint64) {
tree.bufferSize = bufferSize
tree.ary = ary
tree.cache = make([]interface{}, 0, bufferSize)
tree.root = newNode(true, newKeys(ary), newNodes(ary))
tree.root.mbr = &rectangle{}
tree.actions = queue.NewRingBuffer(tree.bufferSize)
}
func (tree *tree) operationRunner(xns interfaces, threaded bool) {
writeOperations, deleteOperations, toComplete := tree.fetchKeys(xns, threaded)
tree.recursiveMutate(writeOperations, deleteOperations, false, threaded)
for _, a := range toComplete {
a.complete()
}
tree.reset()
}
func (tree *tree) fetchKeys(xns interfaces, inParallel bool) (map[*node][]*keyBundle, map[*node][]*keyBundle, actions) {
if inParallel {
tree.fetchKeysInParallel(xns)
} else {
tree.fetchKeysInSerial(xns)
}
writeOperations := make(map[*node][]*keyBundle)
deleteOperations := make(map[*node][]*keyBundle)
toComplete := make(actions, 0, len(xns)/2)
for _, ifc := range xns {
action := ifc.(action)
switch action.operation() {
case add:
for i, n := range action.nodes() {
writeOperations[n] = append(writeOperations[n], &keyBundle{key: action.rects()[i].hilbert, left: action.rects()[i].rect})
}
toComplete = append(toComplete, action)
case remove:
for i, n := range action.nodes() {
deleteOperations[n] = append(deleteOperations[n], &keyBundle{key: action.rects()[i].hilbert, left: action.rects()[i].rect})
}
toComplete = append(toComplete, action)
case get:
action.complete()
}
}
return writeOperations, deleteOperations, toComplete
}
func (tree *tree) fetchKeysInSerial(xns interfaces) {
for _, ifc := range xns {
action := ifc.(action)
switch action.operation() {
case add, remove:
for i, key := range action.rects() {
n := getParent(tree.root, key.hilbert, key.rect)
action.addNode(int64(i), n)
}
case get:
ga := action.(*getAction)
rects := tree.search(ga.lookup)
ga.result = rects
}
}
}
func (tree *tree) reset() {
for i := range tree.cache {
tree.cache[i] = nil
}
tree.cache = tree.cache[:0]
atomic.StoreUint64(&tree.running, 0)
tree.checkAndRun(nil)
}
func (tree *tree) fetchKeysInParallel(xns []interface{}) {
var forCache struct {
i int64
buffer [8]uint64 // different cache lines
js []int64
}
for j := 0; j < len(xns); j++ {
forCache.js = append(forCache.js, -1)
}
numCPU := runtime.NumCPU()
if numCPU > 1 {
numCPU--
}
var wg sync.WaitGroup
wg.Add(numCPU)
for k := 0; k < numCPU; k++ {
go func() {
for {
index := atomic.LoadInt64(&forCache.i)
if index >= int64(len(xns)) {
break
}
action := xns[index].(action)
j := atomic.AddInt64(&forCache.js[index], 1)
if j > int64(len(action.rects())) { // someone else is updating i
continue
} else if j == int64(len(action.rects())) {
atomic.StoreInt64(&forCache.i, index+1)
continue
}
switch action.operation() {
case add, remove:
hb := action.rects()[j]
n := getParent(tree.root, hb.hilbert, hb.rect)
action.addNode(j, n)
case get:
ga := action.(*getAction)
result := tree.search(ga.lookup)
ga.result = result
}
}
wg.Done()
}()
}
wg.Wait()
}
func (tree *tree) splitNode(n, parent *node, nodes *[]*node, keys *hilberts) {
if !n.needsSplit(tree.ary) {
return
}
length := n.keys.len()
splitAt := tree.ary - 1
for i := splitAt; i < length; i += splitAt {
offset := length - i
k, left, right := n.split(offset, tree.ary)
left.right = right
*keys = append(*keys, k)
*nodes = append(*nodes, left, right)
left.parent = parent
right.parent = parent
}
}
func (tree *tree) applyNode(n *node, adds, deletes []*keyBundle) {
for _, kb := range deletes {
if n.keys.len() == 0 {
break
}
deleted := n.delete(kb)
if deleted != nil {
atomic.AddUint64(&tree.number, ^uint64(0))
}
}
for _, kb := range adds {
old := n.insert(kb)
if n.isLeaf && old == nil {
atomic.AddUint64(&tree.number, 1)
}
}
}
func (tree *tree) recursiveMutate(adds, deletes map[*node][]*keyBundle, setRoot, inParallel bool) {
if len(adds) == 0 && len(deletes) == 0 {
return
}
if setRoot && len(adds) > 1 {
panic(`SHOULD ONLY HAVE ONE ROOT`)
}
ifs := make(interfaces, 0, len(adds))
for n := range adds {
if n.parent == nil {
setRoot = true
}
ifs = append(ifs, n)
}
for n := range deletes {
if n.parent == nil {
setRoot = true
}
if _, ok := adds[n]; !ok {
ifs = append(ifs, n)
}
}
var dummyRoot *node
if setRoot {
dummyRoot = &node{
keys: newKeys(tree.ary),
nodes: newNodes(tree.ary),
mbr: &rectangle{},
}
}
var write sync.Mutex
nextLayerWrite := make(map[*node][]*keyBundle)
nextLayerDelete := make(map[*node][]*keyBundle)
var mutate func(interfaces, func(interface{}))
if inParallel {
mutate = executeInterfacesInParallel
} else {
mutate = executeInterfacesInSerial
}
mutate(ifs, func(ifc interface{}) {
n := ifc.(*node)
adds := adds[n]
deletes := deletes[n]
if len(adds) == 0 && len(deletes) == 0 {
return
}
if setRoot {
tree.root = n
}
parent := n.parent
if parent == nil {
parent = dummyRoot
setRoot = true
}
tree.applyNode(n, adds, deletes)
if n.needsSplit(tree.ary) {
keys := make(hilberts, 0, n.keys.len())
nodes := make([]*node, 0, n.nodes.len())
tree.splitNode(n, parent, &nodes, &keys)
write.Lock()
for i, k := range keys {
nextLayerWrite[parent] = append(nextLayerWrite[parent], &keyBundle{key: k, left: nodes[i*2], right: nodes[i*2+1]})
}
write.Unlock()
}
})
tree.recursiveMutate(nextLayerWrite, nextLayerDelete, setRoot, inParallel)
}
// Insert will add the provided keys to the tree.
func (tree *tree) Insert(rects ...rtree.Rectangle) {
ia := newInsertAction(rects)
tree.checkAndRun(ia)
ia.completer.Wait()
}
// Delete will remove the provided keys from the tree. If no
// matching key is found, this is a no-op.
func (tree *tree) Delete(rects ...rtree.Rectangle) {
ra := newRemoveAction(rects)
tree.checkAndRun(ra)
ra.completer.Wait()
}
func (tree *tree) search(r *rectangle) rtree.Rectangles {
if tree.root == nil {
return rtree.Rectangles{}
}
result := make(rtree.Rectangles, 0, 10)
whs := tree.root.searchRects(r)
for len(whs) > 0 {
wh := whs[0]
if n, ok := wh.(*node); ok {
whs = append(whs, n.searchRects(r)...)
} else {
result = append(result, wh)
}
whs = whs[1:]
}
return result
}
// Search will return a list of rectangles that intersect the provided
// rectangle.
func (tree *tree) Search(rect rtree.Rectangle) rtree.Rectangles {
ga := newGetAction(rect)
tree.checkAndRun(ga)
ga.completer.Wait()
return ga.result
}
// Len returns the number of items in the tree.
func (tree *tree) Len() uint64 {
return atomic.LoadUint64(&tree.number)
}
// Dispose will clean up any resources used by this tree. This
// must be called to prevent a memory leak.
func (tree *tree) Dispose() {
tree.actions.Dispose()
atomic.StoreUint64(&tree.disposed, 1)
}
func newTree(bufferSize, ary uint64) *tree {
tree := &tree{}
tree.init(bufferSize, ary)
return tree
}
// New will construct a new Hilbert R-Tree and return it.
func New(bufferSize, ary uint64) rtree.RTree {
return newTree(bufferSize, ary)
} | vendor/src/github.com/Workiva/go-datastructures/rtree/hilbert/tree.go | 0.5564 | 0.509032 | tree.go | starcoder |
package ximage
import (
"image"
"image/color"
"tawesoft.co.uk/go/ximage/xcolor"
)
// RGB is an in-memory image whose At method returns color.RGB values.
type RGB struct {
// Pix holds the image's pixels, as Red values. The pixel at
// (x, y) starts at Pix[(y-Rect.Min.Y)*Stride + (x-Rect.Min.X)*2].
Pix []uint8
// Stride is the Pix stride (in bytes) between vertically adjacent pixels.
Stride int
// Rect is the image's bounds.
Rect image.Rectangle
}
func (p *RGB) ColorModel() color.Model { return xcolor.RGBModel }
func (p *RGB) Bounds() image.Rectangle { return p.Rect }
func (p *RGB) At(x, y int) color.Color {
return p.RGBAt(x, y)
}
func (p *RGB) RGBAt(x, y int) xcolor.RGB {
if !(image.Point{x, y}.In(p.Rect)) {
return xcolor.RGB{}
}
i := p.PixOffset(x, y)
return xcolor.RGB{R: p.Pix[i], G: p.Pix[i+1], B: p.Pix[i+2]}
}
// PixOffset returns the index of the first element of Pix that corresponds to
// the pixel at (x, y).
func (p *RGB) PixOffset(x, y int) int {
return (y-p.Rect.Min.Y)*p.Stride + (x-p.Rect.Min.X)*1
}
func (p *RGB) Set(x, y int, c color.Color) {
if !(image.Point{x, y}.In(p.Rect)) {
return
}
i := p.PixOffset(x, y)
rgba := xcolor.RGBModel.Convert(c).(color.RGBA)
p.Pix[i] = rgba.R
p.Pix[i+1] = rgba.G
p.Pix[i+2] = rgba.B
}
func (p *RGB) SetRGB(x, y int, c xcolor.RGB) {
if !(image.Point{x, y}.In(p.Rect)) {
return
}
i := p.PixOffset(x, y)
p.Pix[i] = c.R
p.Pix[i+1] = c.G
p.Pix[i+2] = c.B
}
// SubImage returns an image representing the portion of the image p visible
// through r. The returned value shares pixels with the original image.
func (p *RGB) SubImage(r image.Rectangle) image.Image {
r = r.Intersect(p.Rect)
// If r1 and r2 are Rectangles, r1.Intersect(r2) is not guaranteed to be inside
// either r1 or r2 if the intersection is empty. Without explicitly checking for
// this, the Pix[i:] expression below can panic.
if r.Empty() {
return &RGB{}
}
i := p.PixOffset(r.Min.X, r.Min.Y)
return &RGB{
Pix: p.Pix[i:],
Stride: p.Stride,
Rect: r,
}
}
// Opaque scans the entire image and reports whether it is fully opaque.
func (p *RGB) Opaque() bool {
return true
}
// NewRGB returns a new RGB image with the given bounds.
func NewRGB(r image.Rectangle) *RGB {
w, h := r.Dx(), r.Dy()
pix := make([]uint8, 3 * w * h)
return &RGB{pix, 3 * w, r}
} | ximage/rgb.go | 0.888105 | 0.572902 | rgb.go | starcoder |
package bullet
// #cgo pkg-config: bullet
// #cgo windows LDFLAGS: -Wl,--allow-multiple-definition
// #include "bulletglue.h"
import "C"
import (
"github.com/fcvarela/gosg/core"
"github.com/go-gl/mathgl/mgl64"
"github.com/golang/glog"
)
func init() {
core.SetPhysicsSystem(&PhysicsSystem{})
}
// convenience
func vec3ToBullet(vec mgl64.Vec3) (out C.plVector3) {
out[0] = C.plReal(vec.X())
out[1] = C.plReal(vec.Y())
out[2] = C.plReal(vec.Z())
return out
}
func quatToBullet(quat mgl64.Quat) (out C.plQuaternion) {
out[0] = C.plReal(quat.X())
out[1] = C.plReal(quat.Y())
out[2] = C.plReal(quat.Z())
out[3] = C.plReal(quat.W)
return out
}
func mat4ToBullet(mat mgl64.Mat4) (out [16]C.plReal) {
for x := 0; x < 16; x++ {
out[x] = C.plReal(mat[x])
}
return out
}
func mat4FromBullet(mat [16]C.plReal) (out mgl64.Mat4) {
for x := 0; x < 16; x++ {
out[x] = float64(mat[x])
}
return out
}
// PhysicsSystem implements the core.PhysicsSystem interface by wrapping the Bullet physics library.
type PhysicsSystem struct {
sdk C.plPhysicsSdkHandle
world C.plDynamicsWorldHandle
}
// Start implements the core.PhysicsSystem interface
func (p *PhysicsSystem) Start() {
glog.Info("Starting")
// create an sdk handle
p.sdk = C.plNewBulletSdk()
// instance a world
p.world = C.plCreateDynamicsWorld(p.sdk)
C.plSetGravity(p.world, 0.0, 0.0, 0.0)
}
// Stop implements the core.PhysicsSystem interface
func (p *PhysicsSystem) Stop() {
glog.Info("Stopping")
C.plDeleteDynamicsWorld(p.world)
C.plDeletePhysicsSdk(p.sdk)
}
// SetGravity implements the core.PhysicsSystem interface
func (p *PhysicsSystem) SetGravity(g mgl64.Vec3) {
vec := vec3ToBullet(g)
C.plSetGravity(p.world, vec[0], vec[1], vec[2])
}
// Update implements the core.PhysicsSystem interface
// fixme: remove gosg dependencies by passing a RigidBodyVec instead of NodeVec
func (p *PhysicsSystem) Update(dt float64, nodes []*core.Node) {
for _, n := range nodes {
n.RigidBody().SetTransform(n.WorldTransform())
}
C.plStepSimulation(p.world, C.plReal(dt))
for _, n := range nodes {
n.SetWorldTransform(n.RigidBody().GetTransform())
}
}
// AddRigidBody implements the core.PhysicsSystem interface
func (p *PhysicsSystem) AddRigidBody(rigidBody core.RigidBody) {
C.plAddRigidBody(p.world, rigidBody.(RigidBody).handle)
}
// RemoveRigidBody implements the core.PhysicsSystem interface
func (p *PhysicsSystem) RemoveRigidBody(rigidBody core.RigidBody) {
C.plRemoveRigidBody(p.world, rigidBody.(RigidBody).handle)
}
// CreateRigidBody implements the core.PhysicsSystem interface
func (p *PhysicsSystem) CreateRigidBody(mass float32, shape core.CollisionShape) core.RigidBody {
body := C.plCreateRigidBody(nil, C.float(mass), shape.(CollisionShape).handle)
r := RigidBody{body}
return r
}
// DeleteRigidBody implements the core.PhysicsSystem interface
func (p *PhysicsSystem) DeleteRigidBody(body core.RigidBody) {
C.plDeleteRigidBody(body.(RigidBody).handle)
}
// NewStaticPlaneShape implements the core.PhysicsSystem interface
func (p *PhysicsSystem) NewStaticPlaneShape(normal mgl64.Vec3, constant float64) core.CollisionShape {
vec := vec3ToBullet(normal)
return CollisionShape{C.plNewStaticPlaneShape(&vec[0], C.float(constant))}
}
// NewSphereShape implements the core.PhysicsSystem interface
func (p *PhysicsSystem) NewSphereShape(radius float64) core.CollisionShape {
return CollisionShape{C.plNewSphereShape(C.plReal(radius))}
}
// NewBoxShape implements the core.PhysicsSystem interface
func (p *PhysicsSystem) NewBoxShape(box mgl64.Vec3) core.CollisionShape {
vec := vec3ToBullet(box)
return CollisionShape{C.plNewBoxShape(vec[0], vec[1], vec[2])}
}
// NewCapsuleShape implements the core.PhysicsSystem interface
func (p *PhysicsSystem) NewCapsuleShape(radius float64, height float64) core.CollisionShape {
return CollisionShape{C.plNewCapsuleShape(C.plReal(radius), C.plReal(height))}
}
// NewConeShape implements the core.PhysicsSystem interface
func (p *PhysicsSystem) NewConeShape(radius float64, height float64) core.CollisionShape {
return CollisionShape{C.plNewConeShape(C.plReal(radius), C.plReal(height))}
}
// NewCylinderShape implements the core.PhysicsSystem interface
func (p *PhysicsSystem) NewCylinderShape(radius float64, height float64) core.CollisionShape {
return CollisionShape{C.plNewCylinderShape(C.plReal(radius), C.plReal(height))}
}
// NewCompoundShape implements the core.PhysicsSystem interface
func (p *PhysicsSystem) NewCompoundShape() core.CollisionShape {
return CollisionShape{C.plNewCompoundShape()}
}
// NewConvexHullShape implements the core.PhysicsSystem interface
func (p *PhysicsSystem) NewConvexHullShape() core.CollisionShape {
return CollisionShape{C.plNewConvexHullShape()}
}
// NewStaticTriangleMeshShape implements the core.PhysicsSystem interface
func (p *PhysicsSystem) NewStaticTriangleMeshShape(mesh core.Mesh) core.CollisionShape {
/*
bulletMeshInterface := C.plNewMeshInterface()
// add triangles
for v := 0; v < len(indices); v += 3 {
i1 := indices[v+0]
i2 := indices[v+1]
i3 := indices[v+2]
v1 := vec3_to_bullet(positions[i1*3])
v2 := vec3_to_bullet(positions[i2*3])
v3 := vec3_to_bullet(positions[i3*3])
C.plAddTriangle(bulletMeshInterface, &v1[0], &v2[0], &v3[0])
}
return CollisionShape{C.plNewStaticTriangleMeshShape(bulletMeshInterface)}
*/
return nil
}
// DeleteShape implements the core.PhysicsSystem interface
func (p *PhysicsSystem) DeleteShape(shape core.CollisionShape) {
C.plDeleteShape(shape.(CollisionShape).handle)
}
// RigidBody implements the core.RigidBody interface
type RigidBody struct {
handle C.plRigidBodyHandle
}
// GetTransform implements the core.RigidBody interface
func (r RigidBody) GetTransform() mgl64.Mat4 {
mat := mat4ToBullet(mgl64.Ident4())
C.plGetOpenGLMatrix(r.handle, &mat[0])
return mat4FromBullet(mat)
}
// SetTransform implements the core.RigidBody interface
func (r RigidBody) SetTransform(transform mgl64.Mat4) {
mat := mat4ToBullet(transform)
C.plSetOpenGLMatrix(r.handle, &mat[0])
}
// ApplyImpulse implements the core.RigidBody interface
func (r RigidBody) ApplyImpulse(impulse mgl64.Vec3, localPoint mgl64.Vec3) {
i := vec3ToBullet(impulse)
p := vec3ToBullet(localPoint)
C.plApplyImpulse(r.handle, &i[0], &p[0])
}
// CollisionShape implements the core.CollisionShape interface
type CollisionShape struct {
handle C.plCollisionShapeHandle
}
// AddChildShape implements the core.CollisionShape interface
func (c CollisionShape) AddChildShape(s core.CollisionShape, p mgl64.Vec3, o mgl64.Quat) {
vec := vec3ToBullet(p)
quat := quatToBullet(o)
C.plAddChildShape(c.handle, s.(CollisionShape).handle, &vec[0], &quat[0])
}
// AddVertex implements the core.CollisionShape interface
func (c CollisionShape) AddVertex(v mgl64.Vec3) {
C.plAddVertex(c.handle, C.plReal(v.X()), C.plReal(v.Y()), C.plReal(v.Z()))
} | physics/bullet/physicssystem.go | 0.753013 | 0.543833 | physicssystem.go | starcoder |
package attacherdetacher
import (
"fmt"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/controller/volume/cache"
"k8s.io/kubernetes/pkg/util/goroutinemap"
"k8s.io/kubernetes/pkg/volume"
)
// AttacherDetacher defines a set of operations for attaching or detaching a
// volume from a node.
type AttacherDetacher interface {
// Spawns a new goroutine to execute volume-specific logic to attach the
// volume to the node specified in the volumeToAttach.
// Once attachment completes successfully, the actualStateOfWorld is updated
// to indicate the volume is attached to the node.
// If there is an error indicating the volume is already attached to the
// specified node, attachment is assumed to be successful (plugins are
// responsible for implmenting this behavior).
// All other errors are logged and the goroutine terminates without updating
// actualStateOfWorld (caller is responsible for retrying as needed).
AttachVolume(volumeToAttach *cache.VolumeToAttach, actualStateOfWorld cache.ActualStateOfWorld) error
// Spawns a new goroutine to execute volume-specific logic to detach the
// volume from the node specified in volumeToDetach.
// Once detachment completes successfully, the actualStateOfWorld is updated
// to remove the volume/node combo.
// If there is an error indicating the volume is already detached from the
// specified node, detachment is assumed to be successful (plugins are
// responsible for implmenting this behavior).
// All other errors are logged and the goroutine terminates without updating
// actualStateOfWorld (caller is responsible for retrying as needed).
DetachVolume(volumeToDetach *cache.AttachedVolume, actualStateOfWorld cache.ActualStateOfWorld) error
}
// NewAttacherDetacher returns a new instance of AttacherDetacher.
func NewAttacherDetacher(volumePluginMgr *volume.VolumePluginMgr) AttacherDetacher {
return &attacherDetacher{
volumePluginMgr: volumePluginMgr,
pendingOperations: goroutinemap.NewGoRoutineMap(),
}
}
type attacherDetacher struct {
// volumePluginMgr is the volume plugin manager used to create volume
// plugin objects.
volumePluginMgr *volume.VolumePluginMgr
// pendingOperations keeps track of pending attach and detach operations so
// multiple operations are not started on the same volume
pendingOperations goroutinemap.GoRoutineMap
}
func (ad *attacherDetacher) AttachVolume(
volumeToAttach *cache.VolumeToAttach,
actualStateOfWorld cache.ActualStateOfWorld) error {
attachFunc, err := ad.generateAttachVolumeFunc(volumeToAttach, actualStateOfWorld)
if err != nil {
return err
}
return ad.pendingOperations.Run(volumeToAttach.VolumeName, attachFunc)
}
func (ad *attacherDetacher) DetachVolume(
volumeToDetach *cache.AttachedVolume,
actualStateOfWorld cache.ActualStateOfWorld) error {
detachFunc, err := ad.generateDetachVolumeFunc(volumeToDetach, actualStateOfWorld)
if err != nil {
return err
}
return ad.pendingOperations.Run(volumeToDetach.VolumeName, detachFunc)
}
func (ad *attacherDetacher) generateAttachVolumeFunc(
volumeToAttach *cache.VolumeToAttach,
actualStateOfWorld cache.ActualStateOfWorld) (func() error, error) {
// Get attacher plugin
attachableVolumePlugin, err := ad.volumePluginMgr.FindAttachablePluginBySpec(volumeToAttach.VolumeSpec)
if err != nil || attachableVolumePlugin == nil {
return nil, fmt.Errorf(
"failed to get AttachablePlugin from volumeSpec for volume %q err=%v",
volumeToAttach.VolumeSpec.Name(),
err)
}
volumeAttacher, newAttacherErr := attachableVolumePlugin.NewAttacher()
if newAttacherErr != nil {
return nil, fmt.Errorf(
"failed to get NewAttacher from volumeSpec for volume %q err=%v",
volumeToAttach.VolumeSpec.Name(),
newAttacherErr)
}
return func() error {
// Execute attach
attachErr := volumeAttacher.Attach(volumeToAttach.VolumeSpec, volumeToAttach.NodeName)
if attachErr != nil {
// On failure, just log and exit. The controller will retry
glog.Errorf("Attach operation for %q failed with: %v", volumeToAttach.VolumeName, attachErr)
return attachErr
}
// Update actual state of world
_, addVolumeNodeErr := actualStateOfWorld.AddVolumeNode(volumeToAttach.VolumeSpec, volumeToAttach.NodeName)
if addVolumeNodeErr != nil {
// On failure, just log and exit. The controller will retry
glog.Errorf("Attach operation for %q succeeded but updating actualStateOfWorld failed with: %v", volumeToAttach.VolumeName, addVolumeNodeErr)
return addVolumeNodeErr
}
return nil
}, nil
}
func (ad *attacherDetacher) generateDetachVolumeFunc(
volumeToDetach *cache.AttachedVolume,
actualStateOfWorld cache.ActualStateOfWorld) (func() error, error) {
// Get attacher plugin
attachableVolumePlugin, err := ad.volumePluginMgr.FindAttachablePluginBySpec(volumeToDetach.VolumeSpec)
if err != nil || attachableVolumePlugin == nil {
return nil, fmt.Errorf(
"failed to get AttachablePlugin from volumeSpec for volume %q err=%v",
volumeToDetach.VolumeSpec.Name(),
err)
}
deviceName, err := attachableVolumePlugin.GetDeviceName(volumeToDetach.VolumeSpec)
if err != nil {
return nil, fmt.Errorf(
"failed to GetUniqueVolumeName from AttachablePlugin for volumeSpec %q err=%v",
volumeToDetach.VolumeSpec.Name(),
err)
}
volumeDetacher, err := attachableVolumePlugin.NewDetacher()
if err != nil {
return nil, fmt.Errorf(
"failed to get NewDetacher from volumeSpec for volume %q err=%v",
volumeToDetach.VolumeSpec.Name(),
err)
}
return func() error {
// Execute detach
detachErr := volumeDetacher.Detach(deviceName, volumeToDetach.NodeName)
if detachErr != nil {
// On failure, just log and exit. The controller will retry
glog.Errorf("Detach operation for %q failed with: %v", volumeToDetach.VolumeName, detachErr)
return detachErr
}
// TODO: Reset "safe to detach" annotation on Node
// Update actual state of world
actualStateOfWorld.DeleteVolumeNode(volumeToDetach.VolumeName, volumeToDetach.NodeName)
return nil
}, nil
} | pkg/controller/volume/attacherdetacher/attacher_detacher.go | 0.688573 | 0.466603 | attacher_detacher.go | starcoder |
package value
import (
"math/big"
"robpike.io/ivy/config"
)
func power(c Context, u, v Value) Value {
x := floatSelf(c, u).(BigFloat)
exp := floatSelf(c, v).(BigFloat)
if x.Sign() < 0 && exp.Cmp(floatMinusOne) > 0 && exp.Cmp(floatOne) < 0 {
// Complex solution when x<0 and exp<1
return newComplexReal(u).Pow(c, newComplexReal(v))
}
z := floatPower(c, x, exp)
return BigFloat{z}.shrink()
}
func exp(c Context, u Value) Value {
z := exponential(c.Config(), floatSelf(c, u).(BigFloat).Float)
return BigFloat{z}.shrink()
}
// floatPower computes bx to the power of bexp.
func floatPower(c Context, bx, bexp BigFloat) *big.Float {
x := bx.Float
fexp := newFloat(c).Set(bexp.Float)
positive := true
conf := c.Config()
switch fexp.Sign() {
case 0:
return newFloat(c).SetInt64(1)
case -1:
if x.Sign() == 0 {
Errorf("negative exponent of zero")
}
positive = false
fexp = c.EvalUnary("-", bexp).toType("**", conf, bigFloatType).(BigFloat).Float
}
// Easy cases.
switch {
case x.Cmp(floatOne) == 0, x.Sign() == 0:
return x
case fexp.Cmp(floatHalf) == 0:
z := floatSqrt(c, x)
if !positive {
z = z.Quo(floatOne, z)
}
return z
}
isInt := true
exp, acc := fexp.Int64() // No point in doing *big.Ints now. TODO?
if acc != big.Exact {
isInt = false
}
// Integer part.
z := integerPower(c, x, exp)
// Fractional part.
if !isInt {
frac := fexp.Sub(fexp, newFloat(c).SetInt64(exp))
// x**frac is e**(frac*log x)
logx := floatLog(c, x)
frac.Mul(frac, logx)
z.Mul(z, exponential(c.Config(), frac))
}
if !positive {
z.Quo(floatOne, z)
}
return z
}
// exponential computes exp(x) using the Taylor series. It converges quickly
// since we call it with only small values of x.
func exponential(conf *config.Config, x *big.Float) *big.Float {
// The Taylor series for e**x, exp(x), is 1 + x + x²/2! + x³/3! ...
xN := newF(conf).Set(x)
term := newF(conf)
n := newF(conf)
nFactorial := newF(conf).SetUint64(1)
z := newF(conf).SetInt64(1)
for loop := newLoop(conf, "exponential", x, 10); ; { // Big exponentials converge slowly.
term.Set(xN)
term.Quo(term, nFactorial)
z.Add(z, term)
if loop.done(z) {
break
}
// Advance x**index (multiply by x).
xN.Mul(xN, x)
// Advance n, n!.
nFactorial.Mul(nFactorial, n.SetUint64(loop.i+1))
}
return z
}
// integerPower returns x**exp where exp is an int64 of size <= intBits.
func integerPower(c Context, x *big.Float, exp int64) *big.Float {
z := newFloat(c).SetInt64(1)
y := newFloat(c).Set(x)
// For each loop, we compute a xⁿ where n is a power of two.
for exp > 0 {
if exp&1 == 1 {
// This bit contributes. Multiply it into the result.
z.Mul(z, y)
}
y.Mul(y, y)
exp >>= 1
}
return z
} | value/power.go | 0.651798 | 0.560433 | power.go | starcoder |
package block
import (
"github.com/df-mc/dragonfly/dragonfly/block/fire"
"github.com/df-mc/dragonfly/dragonfly/item"
"github.com/df-mc/dragonfly/dragonfly/world"
"github.com/go-gl/mathgl/mgl64"
)
// Torch are non-solid blocks that emit light.
type Torch struct {
noNBT
transparent
empty
// Facing is the direction from the torch to the block.
Facing world.Face
// Type is the type of fire lighting the torch.
Type fire.Fire
}
// LightEmissionLevel ...
func (t Torch) LightEmissionLevel() uint8 {
switch t.Type {
case fire.Normal():
return 14
default:
return t.Type.LightLevel
}
}
// UseOnBlock ...
func (t Torch) UseOnBlock(pos world.BlockPos, face world.Face, clickPos mgl64.Vec3, w *world.World, user item.User, ctx *item.UseContext) bool {
pos, face, used := firstReplaceable(w, pos, face, t)
if !used {
return false
}
if face == world.FaceDown {
return false
}
if _, ok := w.Block(pos).(world.Liquid); ok {
return false
}
if !w.Block(pos.Side(face.Opposite())).Model().FaceSolid(pos.Side(face.Opposite()), face, w) {
found := false
for _, i := range []world.Face{world.FaceSouth, world.FaceWest, world.FaceNorth, world.FaceEast, world.FaceDown} {
if w.Block(pos.Side(i)).Model().FaceSolid(pos.Side(i), i.Opposite(), w) {
found = true
face = i.Opposite()
break
}
}
if !found {
return false
}
}
t.Facing = face.Opposite()
place(w, pos, t, user, ctx)
return placed(ctx)
}
// NeighbourUpdateTick ...
func (t Torch) NeighbourUpdateTick(pos, _ world.BlockPos, w *world.World) {
if !w.Block(pos.Side(t.Facing)).Model().FaceSolid(pos.Side(t.Facing), t.Facing.Opposite(), w) {
w.BreakBlockWithoutParticles(pos)
}
}
// HasLiquidDrops ...
func (t Torch) HasLiquidDrops() bool {
return true
}
// EncodeItem ...
func (t Torch) EncodeItem() (id int32, meta int16) {
switch t.Type {
case fire.Normal():
return 50, 0
case fire.Soul():
return -268, 0
}
panic("invalid fire type")
}
// EncodeBlock ...
func (t Torch) EncodeBlock() (name string, properties map[string]interface{}) {
facing := "unknown"
switch t.Facing {
case world.FaceDown:
facing = "top"
case world.FaceNorth:
facing = "north"
case world.FaceEast:
facing = "east"
case world.FaceSouth:
facing = "south"
case world.FaceWest:
facing = "west"
}
switch t.Type {
case fire.Normal():
return "minecraft:torch", map[string]interface{}{"torch_facing_direction": facing}
case fire.Soul():
return "minecraft:soul_torch", map[string]interface{}{"torch_facing_direction": facing}
}
panic("invalid fire type")
}
// Hash ...
func (t Torch) Hash() uint64 {
return hashTorch | (uint64(t.Facing) << 32) | (uint64(t.Type.Uint8()) << 35)
}
// allTorch ...
func allTorch() (torch []canEncode) {
for i := world.Face(0); i < 6; i++ {
torch = append(torch, Torch{Type: fire.Normal(), Facing: i})
torch = append(torch, Torch{Type: fire.Soul(), Facing: i})
}
return
} | dragonfly/block/torch.go | 0.678753 | 0.40204 | torch.go | starcoder |
package input
import (
"context"
"errors"
"fmt"
"io"
"sync"
"time"
"github.com/Jeffail/benthos/v3/internal/codec"
"github.com/Jeffail/benthos/v3/internal/docs"
"github.com/Jeffail/benthos/v3/internal/interop"
sftpSetup "github.com/Jeffail/benthos/v3/internal/service/sftp"
"github.com/Jeffail/benthos/v3/lib/input/reader"
"github.com/Jeffail/benthos/v3/lib/log"
"github.com/Jeffail/benthos/v3/lib/message"
"github.com/Jeffail/benthos/v3/lib/metrics"
"github.com/Jeffail/benthos/v3/lib/types"
"github.com/pkg/sftp"
)
func init() {
watcherDocs := docs.FieldSpecs{
docs.FieldCommon(
"enabled",
"Whether file watching is enabled.",
),
docs.FieldCommon(
"minimum_age",
"The minimum period of time since a file was last updated before attempting to consume it. Increasing this period decreases the likelihood that a file will be consumed whilst it is still being written to.",
"10s", "1m", "10m",
),
docs.FieldCommon(
"poll_interval",
"The interval between each attempt to scan the target paths for new files.",
"100ms", "1s",
),
docs.FieldCommon(
"cache",
"A [cache resource](/docs/components/caches/about) for storing the paths of files already consumed.",
),
}
Constructors[TypeSFTP] = TypeSpec{
constructor: fromSimpleConstructor(func(conf Config, mgr types.Manager, log log.Modular, stats metrics.Type) (Type, error) {
r, err := newSFTPReader(conf.SFTP, mgr, log, stats)
if err != nil {
return nil, err
}
return NewAsyncReader(
TypeSFTP,
true,
reader.NewAsyncPreserver(r),
log, stats,
)
}),
Status: docs.StatusExperimental,
Version: "3.39.0",
Summary: `Consumes files from a server over SFTP.`,
Description: `
## Metadata
This input adds the following metadata fields to each message:
` + "```" + `
- sftp_path
` + "```" + `
You can access these metadata fields using [function interpolation](/docs/configuration/interpolation#metadata).`,
FieldSpecs: docs.FieldSpecs{
docs.FieldCommon(
"address",
"The address of the server to connect to that has the target files.",
),
docs.FieldCommon(
"credentials",
"The credentials to use to log into the server.",
).WithChildren(sftpSetup.CredentialsDocs()...),
docs.FieldCommon(
"paths",
"A list of paths to consume sequentially. Glob patterns are supported.",
).Array(),
codec.ReaderDocs,
docs.FieldAdvanced("delete_on_finish", "Whether to delete files from the server once they are processed."),
docs.FieldAdvanced("max_buffer", "The largest token size expected when consuming delimited files."),
docs.FieldCommon(
"watcher",
"An experimental mode whereby the input will periodically scan the target paths for new files and consume them, when all files are consumed the input will continue polling for new files.",
).WithChildren(watcherDocs...).AtVersion("3.42.0"),
},
Categories: []Category{
CategoryNetwork,
},
}
}
//------------------------------------------------------------------------------
type watcherConfig struct {
Enabled bool `json:"enabled" yaml:"enabled"`
MinimumAge string `json:"minimum_age" yaml:"minimum_age"`
PollInterval string `json:"poll_interval" yaml:"poll_interval"`
Cache string `json:"cache" yaml:"cache"`
}
// SFTPConfig contains configuration fields for the SFTP input type.
type SFTPConfig struct {
Address string `json:"address" yaml:"address"`
Credentials sftpSetup.Credentials `json:"credentials" yaml:"credentials"`
Paths []string `json:"paths" yaml:"paths"`
Codec string `json:"codec" yaml:"codec"`
DeleteOnFinish bool `json:"delete_on_finish" yaml:"delete_on_finish"`
MaxBuffer int `json:"max_buffer" yaml:"max_buffer"`
Watcher watcherConfig `json:"watcher" yaml:"watcher"`
}
// NewSFTPConfig creates a new SFTPConfig with default values.
func NewSFTPConfig() SFTPConfig {
return SFTPConfig{
Address: "",
Credentials: sftpSetup.Credentials{},
Paths: []string{},
Codec: "all-bytes",
DeleteOnFinish: false,
MaxBuffer: 1000000,
Watcher: watcherConfig{
Enabled: false,
MinimumAge: "1s",
PollInterval: "1s",
Cache: "",
},
}
}
//------------------------------------------------------------------------------
type sftpReader struct {
conf SFTPConfig
log log.Modular
stats metrics.Type
mgr types.Manager
client *sftp.Client
paths []string
scannerCtor codec.ReaderConstructor
scannerMut sync.Mutex
scanner codec.Reader
currentPath string
watcherPollInterval time.Duration
watcherMinAge time.Duration
}
func newSFTPReader(conf SFTPConfig, mgr types.Manager, log log.Modular, stats metrics.Type) (*sftpReader, error) {
codecConf := codec.NewReaderConfig()
codecConf.MaxScanTokenSize = conf.MaxBuffer
ctor, err := codec.GetReader(conf.Codec, codecConf)
if err != nil {
return nil, err
}
var watcherPollInterval, watcherMinAge time.Duration
if conf.Watcher.Enabled {
if watcherPollInterval, err = time.ParseDuration(conf.Watcher.PollInterval); err != nil {
return nil, fmt.Errorf("failed to parse watcher poll interval: %w", err)
}
if watcherMinAge, err = time.ParseDuration(conf.Watcher.MinimumAge); err != nil {
return nil, fmt.Errorf("failed to parse watcher minimum age: %w", err)
}
if conf.Watcher.Cache == "" {
return nil, errors.New("a cache must be specified when watcher mode is enabled")
}
if err := interop.ProbeCache(context.Background(), mgr, conf.Watcher.Cache); err != nil {
return nil, err
}
}
s := &sftpReader{
conf: conf,
log: log,
stats: stats,
mgr: mgr,
scannerCtor: ctor,
watcherPollInterval: watcherPollInterval,
watcherMinAge: watcherMinAge,
}
return s, err
}
// ConnectWithContext attempts to establish a connection to the target SFTP server.
func (s *sftpReader) ConnectWithContext(ctx context.Context) error {
var err error
s.scannerMut.Lock()
defer s.scannerMut.Unlock()
if s.scanner != nil {
return nil
}
if s.client == nil {
if s.client, err = s.conf.Credentials.GetClient(s.conf.Address); err != nil {
return err
}
s.paths, err = s.getFilePaths()
if err != nil {
return err
}
}
if len(s.paths) == 0 {
if !s.conf.Watcher.Enabled {
s.client.Close()
s.client = nil
return types.ErrTypeClosed
}
select {
case <-time.After(s.watcherPollInterval):
case <-ctx.Done():
return ctx.Err()
}
s.paths, err = s.getFilePaths()
return err
}
nextPath := s.paths[0]
file, err := s.client.Open(nextPath)
if err != nil {
return err
}
if s.scanner, err = s.scannerCtor(nextPath, file, func(ctx context.Context, err error) error {
if err == nil && s.conf.DeleteOnFinish {
return s.client.Remove(nextPath)
}
return nil
}); err != nil {
file.Close()
return err
}
s.currentPath = nextPath
s.paths = s.paths[1:]
s.log.Infof("Consuming from file '%v'\n", nextPath)
return err
}
// ReadWithContext attempts to read a new message from the target file(s) on the server.
func (s *sftpReader) ReadWithContext(ctx context.Context) (types.Message, reader.AsyncAckFn, error) {
s.scannerMut.Lock()
defer s.scannerMut.Unlock()
if s.scanner == nil || s.client == nil {
return nil, nil, types.ErrNotConnected
}
parts, codecAckFn, err := s.scanner.Next(ctx)
if err != nil {
if errors.Is(err, context.Canceled) ||
errors.Is(err, context.DeadlineExceeded) {
err = types.ErrTimeout
}
if err != types.ErrTimeout {
if s.conf.Watcher.Enabled {
var setErr error
if cerr := interop.AccessCache(ctx, s.mgr, s.conf.Watcher.Cache, func(cache types.Cache) {
setErr = cache.Set(s.currentPath, []byte("@"))
}); cerr != nil {
return nil, nil, fmt.Errorf("failed to get the cache for sftp watcher mode: %v", cerr)
}
if setErr != nil {
return nil, nil, fmt.Errorf("failed to update path in cache %s: %v", s.currentPath, err)
}
}
s.scanner.Close(ctx)
s.scanner = nil
}
if errors.Is(err, io.EOF) {
err = types.ErrTimeout
}
return nil, nil, err
}
for _, part := range parts {
part.Metadata().Set("sftp_path", s.currentPath)
}
msg := message.New(nil)
msg.Append(parts...)
return msg, func(ctx context.Context, res types.Response) error {
return codecAckFn(ctx, res.Error())
}, nil
}
// CloseAsync begins cleaning up resources used by this reader asynchronously.
func (s *sftpReader) CloseAsync() {
go func() {
s.scannerMut.Lock()
if s.scanner != nil {
s.scanner.Close(context.Background())
s.scanner = nil
s.paths = nil
}
if s.client != nil {
s.client.Close()
s.client = nil
}
s.scannerMut.Unlock()
}()
}
// WaitForClose will block until either the reader is closed or a specified
// timeout occurs.
func (s *sftpReader) WaitForClose(timeout time.Duration) error {
return nil
}
func (s *sftpReader) getFilePaths() ([]string, error) {
var filepaths []string
if !s.conf.Watcher.Enabled {
for _, p := range s.conf.Paths {
paths, err := s.client.Glob(p)
if err != nil {
s.log.Warnf("Failed to scan files from path %v: %v\n", p, err)
continue
}
filepaths = append(filepaths, paths...)
}
return filepaths, nil
}
if cerr := interop.AccessCache(context.Background(), s.mgr, s.conf.Watcher.Cache, func(cache types.Cache) {
for _, p := range s.conf.Paths {
paths, err := s.client.Glob(p)
if err != nil {
s.log.Warnf("Failed to scan files from path %v: %v\n", p, err)
continue
}
for _, path := range paths {
info, err := s.client.Stat(path)
if err != nil {
s.log.Warnf("Failed to stat path %v: %v\n", path, err)
continue
}
if time.Since(info.ModTime()) < s.watcherMinAge {
continue
}
if _, err := cache.Get(path); err != nil {
filepaths = append(filepaths, path)
} else if err = cache.Set(path, []byte("@")); err != nil { // Reset the TTL for the path
s.log.Warnf("Failed to set key in cache for path %v: %v\n", path, err)
}
}
}
}); cerr != nil {
return nil, fmt.Errorf("error getting cache in getFilePaths: %v", cerr)
}
return filepaths, nil
} | lib/input/sftp.go | 0.65368 | 0.490602 | sftp.go | starcoder |
package relpos
import (
"github.com/goki/gi/mat32"
"github.com/goki/ki/kit"
)
// Rel defines a position relationship among layers, in terms of X,Y width and height of layer
// and associated position within a given X-Y plane,
// and Z vertical stacking of layers above and below each other.
type Rel struct {
Rel Relations `desc:"spatial relationship between this layer and the other layer"`
XAlign XAligns `viewif:"Rel=FrontOf,Behind,Above,Below" desc:"horizontal (x-axis) alignment relative to other"`
YAlign YAligns `viewif:"Rel=LeftOf,RightOf,Above,Below" desc:"vertical (y-axis) alignment relative to other"`
Other string `desc:"name of the other layer we are in relationship to"`
Scale float32 `desc:"scaling factor applied to layer size for displaying"`
Space float32 `desc:"number of unit-spaces between us"`
XOffset float32 `desc:"for vertical (y-axis) alignment, amount we are offset relative to perfect alignment"`
YOffset float32 `desc:"for horizontial (x-axis) alignment, amount we are offset relative to perfect alignment"`
}
// Defaults sets default scale, space, offset values -- rel, align must be set specifically
// These are automatically applied if Scale = 0
func (rp *Rel) Defaults() {
if rp.Scale == 0 {
rp.Scale = 1
}
if rp.Space == 0 {
rp.Space = 5
}
}
// Pos returns the relative position compared to other position and size, based on settings
// osz and sz must both have already been scaled by relevant Scale factor
func (rp *Rel) Pos(op mat32.Vec3, osz mat32.Vec2, sz mat32.Vec2) mat32.Vec3 {
if rp.Scale == 0 {
rp.Defaults()
}
rs := op
switch rp.Rel {
case NoRel:
return op
case RightOf:
rs.X = op.X + osz.X + rp.Space
rs.Y = rp.AlignYPos(op.Y, osz.Y, sz.Y)
case LeftOf:
rs.X = op.X - sz.X - rp.Space
rs.Y = rp.AlignYPos(op.Y, osz.Y, sz.Y)
case Behind:
rs.Y = op.Y + osz.Y + rp.Space
rs.X = rp.AlignXPos(op.X, osz.X, sz.X)
case FrontOf:
rs.Y = op.Y - sz.Y - rp.Space
rs.X = rp.AlignXPos(op.X, osz.X, sz.X)
case Above:
rs.Z += 1
rs.X = rp.AlignXPos(op.X, osz.X, sz.X)
rs.Y = rp.AlignYPos(op.Y, osz.Y, sz.Y)
case Below:
rs.Z += 1
rs.X = rp.AlignXPos(op.X, osz.X, sz.X)
rs.Y = rp.AlignYPos(op.Y, osz.Y, sz.Y)
}
return rs
}
// AlignYPos returns the Y-axis (within-plane vertical or height) position according to alignment factors
func (rp *Rel) AlignYPos(yop, yosz, ysz float32) float32 {
switch rp.YAlign {
case Front:
return yop + rp.YOffset
case Center:
return yop + 0.5*yosz - 0.5*ysz + rp.YOffset
case Back:
return yop + yosz - ysz + rp.YOffset
}
return yop
}
// AlignXPos returns the X-axis (within-plane horizontal or width) position according to alignment factors
func (rp *Rel) AlignXPos(xop, xosz, xsz float32) float32 {
switch rp.XAlign {
case Left:
return xop + rp.XOffset
case Middle:
return xop + 0.5*xosz - 0.5*xsz + rp.XOffset
case Right:
return xop + xosz - xsz + rp.XOffset
}
return xop
}
// Relations are different spatial relationships (of layers)
type Relations int
//go:generate stringer -type=Relations
var KiT_Relations = kit.Enums.AddEnum(RelationsN, kit.NotBitFlag, nil)
func (ev Relations) MarshalJSON() ([]byte, error) { return kit.EnumMarshalJSON(ev) }
func (ev *Relations) UnmarshalJSON(b []byte) error { return kit.EnumUnmarshalJSON(ev, b) }
// The relations
const (
NoRel Relations = iota
RightOf
LeftOf
Behind
FrontOf
Above
Below
RelationsN
)
// XAligns are different horizontal alignments
type XAligns int
//go:generate stringer -type=XAligns
var KiT_XAligns = kit.Enums.AddEnum(XAlignsN, kit.NotBitFlag, nil)
func (ev XAligns) MarshalJSON() ([]byte, error) { return kit.EnumMarshalJSON(ev) }
func (ev *XAligns) UnmarshalJSON(b []byte) error { return kit.EnumUnmarshalJSON(ev, b) }
const (
Left XAligns = iota
Middle
Right
XAlignsN
)
// YAligns are different vertical alignments
type YAligns int
//go:generate stringer -type=YAligns
var KiT_YAligns = kit.Enums.AddEnum(YAlignsN, kit.NotBitFlag, nil)
func (ev YAligns) MarshalJSON() ([]byte, error) { return kit.EnumMarshalJSON(ev) }
func (ev *YAligns) UnmarshalJSON(b []byte) error { return kit.EnumUnmarshalJSON(ev, b) }
const (
Front YAligns = iota
Center
Back
YAlignsN
) | relpos/rel.go | 0.792143 | 0.573021 | rel.go | starcoder |
package bitstring
/* unsigned integer get */
// Uint8 interprets the 8 bits at offset off as an uint8 in big endian and
// returns its value. Behavior is undefined if there aren't enough bits.
func (bs *Bitstring) Uint8(off int) uint8 {
bs.mustExist(off + 7)
return uint8(bs.uint(uint64(off), 7))
}
// Uint16 interprets the 16 bits at offset off as an uint16 in big endian and
// returns its value. Behavior is undefined if there aren't enough bits.
func (bs *Bitstring) Uint16(off int) uint16 {
bs.mustExist(off + 15)
return uint16(bs.uint(uint64(off), 15))
}
// Uint32 interprets the 32 bits at offset off as an uint32 in big endian and
// returns its value. Behavior is undefined if there aren't enough bits.
func (bs *Bitstring) Uint32(off int) uint32 {
bs.mustExist(off + 31)
return uint32(bs.uint(uint64(off), 31))
}
// Uint64 interprets the 64 bits at offset off as an uint64 in big endian and
// returns its value. Behavior is undefined if there aren't enough bits.
func (bs *Bitstring) Uint64(off int) uint64 {
bs.mustExist(off + 63)
if off&((1<<6)-1) == 0 {
// Fast path: off is a multiple of 64.
return uint64(bs.data[off>>6])
}
i := uint64(off)
w := wordoffset(i)
bit := bitoffset(i)
loword := bs.data[w] >> bit
hiword := bs.data[w+1] & ((1 << bit) - 1)
return uint64(loword | hiword<<(64-bit))
}
// Uintn interprets the n bits at offset off as an n-bit unsigned integer in big
// endian and returns its value. Behavior is undefined if there aren't enough
// bits. Panics if nbits is greater than 64.
func (bs *Bitstring) Uintn(off, n int) uint64 {
if n > 64 || n < 1 {
panic("Uintn supports unsigned integers from 1 to 64 bits long")
}
bs.mustExist(off + n - 1)
i, nbits := uint64(off), uint64(n)
j := wordoffset(i)
k := wordoffset(i + nbits - 1)
looff := bitoffset(i)
loword := bs.data[j]
if j == k {
// Fast path: value doesn't cross uint64 boundaries.
return (loword >> looff) & lomask(nbits)
}
hioff := bitoffset(i + nbits)
hiword := bs.data[k] & lomask(hioff)
loword = himask(looff) & loword >> looff
return loword | hiword<<(64-looff)
}
func (bs *Bitstring) uint(off, n uint64) uint64 {
bit := bitoffset(off)
loword := bs.data[wordoffset(off)] >> bit
hiword := bs.data[wordoffset(off+n)] & ((1 << bit) - 1)
return loword | hiword<<(64-bit)
}
/* unsigned integer set */
// SetUint8 sets the 8 bits at offset off with the given int8 value, in big
// endian. Behavior is undefined if there aren't enough bits.
func (bs *Bitstring) SetUint8(off int, val uint8) {
bs.mustExist(off + 7)
i := uint64(off)
lobit := bitoffset(i)
j := wordoffset(i)
k := wordoffset(i + 7)
if j == k {
// Fast path: value doesn't cross uint64 boundaries.
lobit := bitoffset(i)
neww := uint64(val) << lobit
msk := mask(lobit, lobit+8)
bs.data[j] = transferbits(bs.data[j], neww, msk)
return
}
// Transfer bits to low word.
bs.data[j] = transferbits(bs.data[j], uint64(val)<<lobit, himask(lobit))
// Transfer bits to high word.
lon := 64 - lobit
bs.data[k] = transferbits(bs.data[k], uint64(val)>>lon, lomask(8-lon))
}
// SetUint16 sets the 8 bits at offset off with the given int8 value, in big
// endian. Behavior is undefined if there aren't enough bits.
func (bs *Bitstring) SetUint16(off int, val uint16) {
bs.mustExist(off + 15)
i := uint64(off)
lobit := bitoffset(i)
j := wordoffset(i)
k := wordoffset(i + 15)
if j == k {
// Fast path: value doesn't cross uint64 boundaries.
neww := uint64(val) << lobit
msk := mask(lobit, lobit+16)
bs.data[j] = transferbits(bs.data[j], neww, msk)
return
}
// Transfer bits to low word.
bs.data[j] = transferbits(bs.data[j], uint64(val)<<lobit, himask(lobit))
// Transfer bits to high word.
lon := 64 - lobit
bs.data[k] = transferbits(bs.data[k], uint64(val)>>lon, lomask(16-lon))
}
// SetUint32 sets the 8 bits at offset off with the given int8 value, in big
// endian. Behavior is undefined if there aren't enough bits.
func (bs *Bitstring) SetUint32(off int, val uint32) {
bs.mustExist(off + 31)
i := uint64(off)
lobit := bitoffset(i)
j := wordoffset(i)
k := wordoffset(i + 31)
if j == k {
// Fast path: value doesn't cross uint64 boundaries.
neww := uint64(val) << lobit
msk := mask(lobit, lobit+32)
bs.data[j] = transferbits(bs.data[j], neww, msk)
return
}
// Transfer bits to low word.
bs.data[j] = transferbits(bs.data[j], uint64(val)<<lobit, himask(lobit))
// Transfer bits to high word.
lon := 64 - lobit
bs.data[k] = transferbits(bs.data[k], uint64(val)>>lon, lomask(32-lon))
}
// SetUint64 sets the 8 bits at offset off with the given int8 value, in big
// endian. Behavior is undefined if there aren't enough bits.
func (bs *Bitstring) SetUint64(off int, val uint64) {
bs.mustExist(off + 63)
i := uint64(off)
lobit := bitoffset(i)
j := wordoffset(i)
if off&((1<<6)-1) == 0 {
// Fast path: off is a multiple of 64.
bs.data[off>>6] = val
return
}
// Transfer bits to low word.
bs.data[j] = transferbits(bs.data[j], uint64(val)<<lobit, himask(lobit))
// Transfer bits to high word.
lon := (64 - lobit)
k := wordoffset(i + 63)
bs.data[k] = transferbits(bs.data[k], uint64(val)>>lon, lomask(64-lon))
}
// SetUintn sets the n bits at offset off with the given n-bit unsigned integer in
// big endian. Behavior is undefined if there aren't enough bits. Panics if
// nbits is greater than 64.
func (bs *Bitstring) SetUintn(off, n int, val uint64) {
if n > 64 || n < 1 {
panic("SetUintn supports unsigned integers from 1 to 64 bits long")
}
bs.mustExist(off + n - 1)
i, nbits := uint64(off), uint64(n)
lobit := bitoffset(i)
j := wordoffset(i)
k := wordoffset(i + nbits - 1)
if j == k {
// Fast path: value doesn't cross uint64 boundaries.
x := (val & lomask(nbits)) << lobit
bs.data[j] = transferbits(bs.data[j], x, mask(lobit, lobit+nbits))
return
}
// First and last bits are on different words.
// Transfer bits to low word.
lon := 64 - lobit // how many bits of n we transfer to loword
bs.data[j] = transferbits(bs.data[j], val<<lobit, himask(lon))
// Transfer bits to high word.
bs.data[k] = transferbits(bs.data[k], val>>lon, lomask(nbits-lon))
}
/* signed get */
// Int8 interprets the 8 bits at offset off as an int8 in big endian and
// returns its value. Behavior is undefined if there aren't enough bits.
func (bs *Bitstring) Int8(off int) int8 { return int8(bs.Uint8(off)) }
// Int16 interprets the 16 bits at offset off as an int16 in big endian and
// returns its value. Behavior is undefined if there aren't enough bits.
func (bs *Bitstring) Int16(off int) int16 { return int16(bs.Uint16(off)) }
// Int32 interprets the 32 bits at offset off as an int32 in big endian and
// returns its value. Behavior is undefined if there aren't enough bits.
func (bs *Bitstring) Int32(off int) int32 { return int32(bs.Uint32(off)) }
// Int64 interprets the 64 bits at offset off as an int64 in big endian and
// returns its value. Behavior is undefined if there aren't enough bits.
func (bs *Bitstring) Int64(off int) int64 { return int64(bs.Uint64(off)) }
// Intn interprets the n bits at offset off as an n-bit signed integer in big
// endian and returns its value. Behavior is undefined if there aren't enough
// bits. Panics if nbits is greater than 64.
func (bs *Bitstring) Intn(off, n int) int64 { return int64(bs.Uintn(off, n)) }
/* signed integer set */
// SetInt8 sets the 8 bits at offset off with the given int8 value, in big
// endian. Behavior is undefined if there aren't enough bits.
func (bs *Bitstring) SetInt8(off int, val int8) { bs.SetUint8(off, uint8(val)) }
// SetInt16 sets the 16 bits at offset off with the given int16 value, in big
// endian. Behavior is undefined if there aren't enough bits.
func (bs *Bitstring) SetInt16(off int, val int16) { bs.SetUint16(off, uint16(val)) }
// SetInt32 sets the 32 bits at offset off with the given int32 value, in big
// endian. Behavior is undefined if there aren't enough bits.
func (bs *Bitstring) SetInt32(off int, val int32) { bs.SetUint32(off, uint32(val)) }
// SetInt64 sets the 64 bits at offset off with the given int64 value, in big
// endian. Behavior is undefined if there aren't enough bits.
func (bs *Bitstring) SetInt64(off int, val int64) { bs.SetUint64(off, uint64(val)) }
// SetIntn sets the n bits at offset off with the given n-bit signed integer in
// big endian. Behavior is undefined if there aren't enough bits. Panics if
// nbits is greater than 64.
func (bs *Bitstring) SetIntn(off, n int, val int64) { bs.SetUintn(off, n, uint64(val)) } | int.go | 0.682997 | 0.420005 | int.go | starcoder |
package vote
import (
"context"
)
// OpinionGiver gives opinions about the given IDs.
type OpinionGiver interface {
// Query queries the OpinionGiver for its opinions on the given IDs.
// The passed in context can be used to signal cancellation of the query.
Query(ctx context.Context, ids []string) (Opinions, error)
// ID returns the ID of the opinion giver.
ID() string
}
// QueriedOpinions represents queried opinions from a given opinion giver.
type QueriedOpinions struct {
// The ID of the opinion giver.
OpinionGiverID string `json:"opinion_giver_id"`
// The map of IDs to opinions.
Opinions map[string]Opinion `json:"opinions"`
// The amount of times the opinion giver's opinion has counted.
// Usually this number is 1 but due to randomization of the queried opinion givers,
// the same opinion giver's opinions might be taken into account multiple times.
TimesCounted int `json:"times_counted"`
}
// OpinionGiverFunc is a function which gives a slice of OpinionGivers or an error.
type OpinionGiverFunc func() ([]OpinionGiver, error)
// Opinions is a slice of Opinion.
type Opinions []Opinion
// Opinion is an opinion about a given thing.
type Opinion byte
const (
Like Opinion = 1 << 0
Dislike Opinion = 1 << 1
Unknown Opinion = 1 << 2
)
func (o Opinion) String() string {
switch {
case o == Like:
return "Like"
case o == Dislike:
return "Dislike"
}
return "Unknown"
}
// ConvertInt32Opinion converts the given int32 to an Opinion.
func ConvertInt32Opinion(x int32) Opinion {
switch {
case x == 1<<0:
return Like
case x == 1<<1:
return Dislike
}
return Unknown
}
// ConvertInts32ToOpinions converts the given slice of int32 to a slice of Opinion.
func ConvertInts32ToOpinions(opinions []int32) []Opinion {
result := make([]Opinion, len(opinions))
for i, opinion := range opinions {
result[i] = ConvertInt32Opinion(opinion)
}
return result
}
// ConvertOpinionToInt32 converts the given Opinion to an int32.
func ConvertOpinionToInt32(x Opinion) int32 {
switch {
case x == Like:
return 1
case x == Dislike:
return 2
}
return 4
}
// ConvertOpinionsToInts32 converts the given slice of Opinion to a slice of int32.
func ConvertOpinionsToInts32(opinions []Opinion) []int32 {
result := make([]int32, len(opinions))
for i, opinion := range opinions {
result[i] = ConvertOpinionToInt32(opinion)
}
return result
} | packages/vote/opinion.go | 0.679604 | 0.616878 | opinion.go | starcoder |
package main
import (
"image"
"image/color"
)
// ycc is an in memory YCbCr image. The Y, Cb and Cr samples are held in a
// single slice to increase resizing performance.
type ycc struct {
// Pix holds the image's pixels, in Y, Cb, Cr order. The pixel at
// (x, y) starts at Pix[(y-Rect.Min.Y)*Stride + (x-Rect.Min.X)*3].
Pix []uint8
// Stride is the Pix stride (in bytes) between vertically adjacent pixels.
Stride int
// Rect is the image's bounds.
Rect image.Rectangle
// SubsampleRatio is the subsample ratio of the original YCbCr image.
SubsampleRatio image.YCbCrSubsampleRatio
}
// PixOffset returns the index of the first element of Pix that corresponds to
// the pixel at (x, y).
func (p *ycc) PixOffset(x, y int) int {
return (y-p.Rect.Min.Y)*p.Stride + (x-p.Rect.Min.X)*3
}
func (p *ycc) Bounds() image.Rectangle {
return p.Rect
}
func (p *ycc) ColorModel() color.Model {
return color.YCbCrModel
}
func (p *ycc) At(x, y int) color.Color {
if !(image.Point{x, y}.In(p.Rect)) {
return color.YCbCr{}
}
i := p.PixOffset(x, y)
return color.YCbCr{
p.Pix[i+0],
p.Pix[i+1],
p.Pix[i+2],
}
}
func (p *ycc) Opaque() bool {
return true
}
// SubImage returns an image representing the portion of the image p visible
// through r. The returned value shares pixels with the original image.
func (p *ycc) SubImage(r image.Rectangle) image.Image {
r = r.Intersect(p.Rect)
if r.Empty() {
return &ycc{SubsampleRatio: p.SubsampleRatio}
}
i := p.PixOffset(r.Min.X, r.Min.Y)
return &ycc{
Pix: p.Pix[i:],
Stride: p.Stride,
Rect: r,
SubsampleRatio: p.SubsampleRatio,
}
}
// newYCC returns a new ycc with the given bounds and subsample ratio.
func newYCC(r image.Rectangle, s image.YCbCrSubsampleRatio) *ycc {
w, h := r.Dx(), r.Dy()
buf := make([]uint8, 3*w*h)
return &ycc{Pix: buf, Stride: 3 * w, Rect: r, SubsampleRatio: s}
}
// Copy of image.YCbCrSubsampleRatio constants - this allows us to support
// older versions of Go where these constants are not defined (i.e. Go 1.4)
const (
ycbcrSubsampleRatio444 image.YCbCrSubsampleRatio = iota
ycbcrSubsampleRatio422
ycbcrSubsampleRatio420
ycbcrSubsampleRatio440
ycbcrSubsampleRatio411
ycbcrSubsampleRatio410
)
// YCbCr converts ycc to a YCbCr image with the same subsample ratio
// as the YCbCr image that ycc was generated from.
func (p *ycc) YCbCr() *image.YCbCr {
ycbcr := image.NewYCbCr(p.Rect, p.SubsampleRatio)
switch ycbcr.SubsampleRatio {
case ycbcrSubsampleRatio422:
return p.ycbcr422(ycbcr)
case ycbcrSubsampleRatio420:
return p.ycbcr420(ycbcr)
case ycbcrSubsampleRatio440:
return p.ycbcr440(ycbcr)
case ycbcrSubsampleRatio444:
return p.ycbcr444(ycbcr)
case ycbcrSubsampleRatio411:
return p.ycbcr411(ycbcr)
case ycbcrSubsampleRatio410:
return p.ycbcr410(ycbcr)
}
return ycbcr
}
// imageYCbCrToYCC converts a YCbCr image to a ycc image for resizing.
func imageYCbCrToYCC(in *image.YCbCr) *ycc {
w, h := in.Rect.Dx(), in.Rect.Dy()
p := ycc{
Pix: make([]uint8, 3*w*h),
Stride: 3 * w,
Rect: image.Rect(0, 0, w, h),
SubsampleRatio: in.SubsampleRatio,
}
switch in.SubsampleRatio {
case ycbcrSubsampleRatio422:
return convertToYCC422(in, &p)
case ycbcrSubsampleRatio420:
return convertToYCC420(in, &p)
case ycbcrSubsampleRatio440:
return convertToYCC440(in, &p)
case ycbcrSubsampleRatio444:
return convertToYCC444(in, &p)
case ycbcrSubsampleRatio411:
return convertToYCC411(in, &p)
case ycbcrSubsampleRatio410:
return convertToYCC410(in, &p)
}
return &p
}
func (p *ycc) ycbcr422(ycbcr *image.YCbCr) *image.YCbCr {
var off int
Pix := p.Pix
Y := ycbcr.Y
Cb := ycbcr.Cb
Cr := ycbcr.Cr
for y := 0; y < ycbcr.Rect.Max.Y-ycbcr.Rect.Min.Y; y++ {
yy := y * ycbcr.YStride
cy := y * ycbcr.CStride
for x := 0; x < ycbcr.Rect.Max.X-ycbcr.Rect.Min.X; x++ {
ci := cy + x/2
Y[yy+x] = Pix[off+0]
Cb[ci] = Pix[off+1]
Cr[ci] = Pix[off+2]
off += 3
}
}
return ycbcr
}
func (p *ycc) ycbcr420(ycbcr *image.YCbCr) *image.YCbCr {
var off int
Pix := p.Pix
Y := ycbcr.Y
Cb := ycbcr.Cb
Cr := ycbcr.Cr
for y := 0; y < ycbcr.Rect.Max.Y-ycbcr.Rect.Min.Y; y++ {
yy := y * ycbcr.YStride
cy := (y / 2) * ycbcr.CStride
for x := 0; x < ycbcr.Rect.Max.X-ycbcr.Rect.Min.X; x++ {
ci := cy + x/2
Y[yy+x] = Pix[off+0]
Cb[ci] = Pix[off+1]
Cr[ci] = Pix[off+2]
off += 3
}
}
return ycbcr
}
func (p *ycc) ycbcr440(ycbcr *image.YCbCr) *image.YCbCr {
var off int
Pix := p.Pix
Y := ycbcr.Y
Cb := ycbcr.Cb
Cr := ycbcr.Cr
for y := 0; y < ycbcr.Rect.Max.Y-ycbcr.Rect.Min.Y; y++ {
yy := y * ycbcr.YStride
cy := (y / 2) * ycbcr.CStride
for x := 0; x < ycbcr.Rect.Max.X-ycbcr.Rect.Min.X; x++ {
ci := cy + x
Y[yy+x] = Pix[off+0]
Cb[ci] = Pix[off+1]
Cr[ci] = Pix[off+2]
off += 3
}
}
return ycbcr
}
func (p *ycc) ycbcr444(ycbcr *image.YCbCr) *image.YCbCr {
var off int
Pix := p.Pix
Y := ycbcr.Y
Cb := ycbcr.Cb
Cr := ycbcr.Cr
for y := 0; y < ycbcr.Rect.Max.Y-ycbcr.Rect.Min.Y; y++ {
yy := y * ycbcr.YStride
cy := y * ycbcr.CStride
for x := 0; x < ycbcr.Rect.Max.X-ycbcr.Rect.Min.X; x++ {
ci := cy + x
Y[yy+x] = Pix[off+0]
Cb[ci] = Pix[off+1]
Cr[ci] = Pix[off+2]
off += 3
}
}
return ycbcr
}
func (p *ycc) ycbcr411(ycbcr *image.YCbCr) *image.YCbCr {
var off int
Pix := p.Pix
Y := ycbcr.Y
Cb := ycbcr.Cb
Cr := ycbcr.Cr
for y := 0; y < ycbcr.Rect.Max.Y-ycbcr.Rect.Min.Y; y++ {
yy := y * ycbcr.YStride
cy := y * ycbcr.CStride
for x := 0; x < ycbcr.Rect.Max.X-ycbcr.Rect.Min.X; x++ {
ci := cy + x/4
Y[yy+x] = Pix[off+0]
Cb[ci] = Pix[off+1]
Cr[ci] = Pix[off+2]
off += 3
}
}
return ycbcr
}
func (p *ycc) ycbcr410(ycbcr *image.YCbCr) *image.YCbCr {
var off int
Pix := p.Pix
Y := ycbcr.Y
Cb := ycbcr.Cb
Cr := ycbcr.Cr
for y := 0; y < ycbcr.Rect.Max.Y-ycbcr.Rect.Min.Y; y++ {
yy := y * ycbcr.YStride
cy := (y / 2) * ycbcr.CStride
for x := 0; x < ycbcr.Rect.Max.X-ycbcr.Rect.Min.X; x++ {
ci := cy + x/4
Y[yy+x] = Pix[off+0]
Cb[ci] = Pix[off+1]
Cr[ci] = Pix[off+2]
off += 3
}
}
return ycbcr
}
func convertToYCC422(in *image.YCbCr, p *ycc) *ycc {
var off int
Pix := p.Pix
Y := in.Y
Cb := in.Cb
Cr := in.Cr
for y := 0; y < in.Rect.Max.Y-in.Rect.Min.Y; y++ {
yy := y * in.YStride
cy := y * in.CStride
for x := 0; x < in.Rect.Max.X-in.Rect.Min.X; x++ {
ci := cy + x/2
Pix[off+0] = Y[yy+x]
Pix[off+1] = Cb[ci]
Pix[off+2] = Cr[ci]
off += 3
}
}
return p
}
func convertToYCC420(in *image.YCbCr, p *ycc) *ycc {
var off int
Pix := p.Pix
Y := in.Y
Cb := in.Cb
Cr := in.Cr
for y := 0; y < in.Rect.Max.Y-in.Rect.Min.Y; y++ {
yy := y * in.YStride
cy := (y / 2) * in.CStride
for x := 0; x < in.Rect.Max.X-in.Rect.Min.X; x++ {
ci := cy + x/2
Pix[off+0] = Y[yy+x]
Pix[off+1] = Cb[ci]
Pix[off+2] = Cr[ci]
off += 3
}
}
return p
}
func convertToYCC440(in *image.YCbCr, p *ycc) *ycc {
var off int
Pix := p.Pix
Y := in.Y
Cb := in.Cb
Cr := in.Cr
for y := 0; y < in.Rect.Max.Y-in.Rect.Min.Y; y++ {
yy := y * in.YStride
cy := (y / 2) * in.CStride
for x := 0; x < in.Rect.Max.X-in.Rect.Min.X; x++ {
ci := cy + x
Pix[off+0] = Y[yy+x]
Pix[off+1] = Cb[ci]
Pix[off+2] = Cr[ci]
off += 3
}
}
return p
}
func convertToYCC444(in *image.YCbCr, p *ycc) *ycc {
var off int
Pix := p.Pix
Y := in.Y
Cb := in.Cb
Cr := in.Cr
for y := 0; y < in.Rect.Max.Y-in.Rect.Min.Y; y++ {
yy := y * in.YStride
cy := y * in.CStride
for x := 0; x < in.Rect.Max.X-in.Rect.Min.X; x++ {
ci := cy + x
Pix[off+0] = Y[yy+x]
Pix[off+1] = Cb[ci]
Pix[off+2] = Cr[ci]
off += 3
}
}
return p
}
func convertToYCC411(in *image.YCbCr, p *ycc) *ycc {
var off int
Pix := p.Pix
Y := in.Y
Cb := in.Cb
Cr := in.Cr
for y := 0; y < in.Rect.Max.Y-in.Rect.Min.Y; y++ {
yy := y * in.YStride
cy := y * in.CStride
for x := 0; x < in.Rect.Max.X-in.Rect.Min.X; x++ {
ci := cy + x/4
Pix[off+0] = Y[yy+x]
Pix[off+1] = Cb[ci]
Pix[off+2] = Cr[ci]
off += 3
}
}
return p
}
func convertToYCC410(in *image.YCbCr, p *ycc) *ycc {
var off int
Pix := p.Pix
Y := in.Y
Cb := in.Cb
Cr := in.Cr
for y := 0; y < in.Rect.Max.Y-in.Rect.Min.Y; y++ {
yy := y * in.YStride
cy := (y / 2) * in.CStride
for x := 0; x < in.Rect.Max.X-in.Rect.Min.X; x++ {
ci := cy + x/4
Pix[off+0] = Y[yy+x]
Pix[off+1] = Cb[ci]
Pix[off+2] = Cr[ci]
off += 3
}
}
return p
} | ycc.go | 0.761804 | 0.548311 | ycc.go | starcoder |
package bulletproofs
import (
"fmt"
"github.com/incognitochain/go-incognito-sdk-v2/crypto"
)
// InnerProductWitness represents a witness for an inner-product proof, described in the Bulletproofs paper.
type InnerProductWitness struct {
a []*crypto.Scalar
b []*crypto.Scalar
p *crypto.Point
}
// InnerProductProof represents an inner-product proof. It is used as a sub-proof for a RangeProof.
type InnerProductProof struct {
l []*crypto.Point
r []*crypto.Point
a *crypto.Scalar
b *crypto.Scalar
p *crypto.Point
}
// Init creates an empty InnerProductProof.
func (proof *InnerProductProof) Init() *InnerProductProof {
proof.l = []*crypto.Point{}
proof.r = []*crypto.Point{}
proof.a = new(crypto.Scalar)
proof.b = new(crypto.Scalar)
proof.p = new(crypto.Point).Identity()
return proof
}
// Bytes returns the byte-representation of an InnerProductProof.
func (proof InnerProductProof) Bytes() []byte {
var res []byte
res = append(res, byte(len(proof.l)))
for _, l := range proof.l {
res = append(res, l.ToBytesS()...)
}
for _, r := range proof.r {
res = append(res, r.ToBytesS()...)
}
res = append(res, proof.a.ToBytesS()...)
res = append(res, proof.b.ToBytesS()...)
res = append(res, proof.p.ToBytesS()...)
return res
}
// SetBytes sets byte-representation data to an InnerProductProof.
func (proof *InnerProductProof) SetBytes(bytes []byte) error {
if len(bytes) == 0 {
return nil
}
lenLArray := int(bytes[0])
offset := 1
var err error
proof.l = make([]*crypto.Point, lenLArray)
for i := 0; i < lenLArray; i++ {
if offset+crypto.Ed25519KeySize > len(bytes) {
return fmt.Errorf("unmarshalling failed")
}
proof.l[i], err = new(crypto.Point).FromBytesS(bytes[offset : offset+crypto.Ed25519KeySize])
if err != nil {
return err
}
offset += crypto.Ed25519KeySize
}
proof.r = make([]*crypto.Point, lenLArray)
for i := 0; i < lenLArray; i++ {
if offset+crypto.Ed25519KeySize > len(bytes) {
return fmt.Errorf("unmarshalling failed")
}
proof.r[i], err = new(crypto.Point).FromBytesS(bytes[offset : offset+crypto.Ed25519KeySize])
if err != nil {
return err
}
offset += crypto.Ed25519KeySize
}
if offset+crypto.Ed25519KeySize > len(bytes) {
return fmt.Errorf("unmarshalling failed")
}
proof.a = new(crypto.Scalar).FromBytesS(bytes[offset : offset+crypto.Ed25519KeySize])
offset += crypto.Ed25519KeySize
if offset+crypto.Ed25519KeySize > len(bytes) {
return fmt.Errorf("unmarshalling failed")
}
proof.b = new(crypto.Scalar).FromBytesS(bytes[offset : offset+crypto.Ed25519KeySize])
offset += crypto.Ed25519KeySize
if offset+crypto.Ed25519KeySize > len(bytes) {
return fmt.Errorf("unmarshalling failed")
}
proof.p, err = new(crypto.Point).FromBytesS(bytes[offset : offset+crypto.Ed25519KeySize])
if err != nil {
return err
}
return nil
}
// Prove returns an InnerProductProof for an InnerProductWitness.
func (wit InnerProductWitness) Prove(GParam []*crypto.Point, HParam []*crypto.Point, uParam *crypto.Point, hashCache []byte) (*InnerProductProof, error) {
if len(wit.a) != len(wit.b) {
return nil, fmt.Errorf("invalid inputs")
}
N := len(wit.a)
a := make([]*crypto.Scalar, N)
b := make([]*crypto.Scalar, N)
for i := range wit.a {
a[i] = new(crypto.Scalar).Set(wit.a[i])
b[i] = new(crypto.Scalar).Set(wit.b[i])
}
p := new(crypto.Point).Set(wit.p)
G := make([]*crypto.Point, N)
H := make([]*crypto.Point, N)
for i := range G {
G[i] = new(crypto.Point).Set(GParam[i])
H[i] = new(crypto.Point).Set(HParam[i])
}
proof := new(InnerProductProof)
proof.l = make([]*crypto.Point, 0)
proof.r = make([]*crypto.Point, 0)
proof.p = new(crypto.Point).Set(wit.p)
for N > 1 {
nPrime := N / 2
cL, err := innerProduct(a[:nPrime], b[nPrime:])
if err != nil {
return nil, err
}
cR, err := innerProduct(a[nPrime:], b[:nPrime])
if err != nil {
return nil, err
}
L, err := encodeVectors(a[:nPrime], b[nPrime:], G[nPrime:], H[:nPrime])
if err != nil {
return nil, err
}
L.Add(L, new(crypto.Point).ScalarMult(uParam, cL))
proof.l = append(proof.l, L)
R, err := encodeVectors(a[nPrime:], b[:nPrime], G[:nPrime], H[nPrime:])
if err != nil {
return nil, err
}
R.Add(R, new(crypto.Point).ScalarMult(uParam, cR))
proof.r = append(proof.r, R)
x := generateChallenge(hashCache, []*crypto.Point{L, R})
hashCache = new(crypto.Scalar).Set(x).ToBytesS()
xInverse := new(crypto.Scalar).Invert(x)
xSquare := new(crypto.Scalar).Mul(x, x)
xSquareInverse := new(crypto.Scalar).Mul(xInverse, xInverse)
// calculate GPrime, HPrime, PPrime for the next loop
GPrime := make([]*crypto.Point, nPrime)
HPrime := make([]*crypto.Point, nPrime)
for i := range GPrime {
GPrime[i] = new(crypto.Point).AddPedersen(xInverse, G[i], x, G[i+nPrime])
HPrime[i] = new(crypto.Point).AddPedersen(x, H[i], xInverse, H[i+nPrime])
}
// x^2 * l + P + xInverse^2 * r
PPrime := new(crypto.Point).AddPedersen(xSquare, L, xSquareInverse, R)
PPrime.Add(PPrime, p)
// calculate aPrime, bPrime
aPrime := make([]*crypto.Scalar, nPrime)
bPrime := make([]*crypto.Scalar, nPrime)
for i := range aPrime {
aPrime[i] = new(crypto.Scalar).Mul(a[i], x)
aPrime[i] = new(crypto.Scalar).MulAdd(a[i+nPrime], xInverse, aPrime[i])
bPrime[i] = new(crypto.Scalar).Mul(b[i], xInverse)
bPrime[i] = new(crypto.Scalar).MulAdd(b[i+nPrime], x, bPrime[i])
}
a = aPrime
b = bPrime
p.Set(PPrime)
G = GPrime
H = HPrime
N = nPrime
}
proof.a = new(crypto.Scalar).Set(a[0])
proof.b = new(crypto.Scalar).Set(b[0])
return proof, nil
} | privacy/v1/zkp/bulletproofs/innerproduct.go | 0.713831 | 0.432063 | innerproduct.go | starcoder |
package decisiontree
// A Node represents a splitting decision of the form "x[FeatureIndex] < Threshold ?" in a decision tree
type Node struct {
// FeatureIndex indicates which feature is used in this splitting decision
FeatureIndex int `json:"feature_index"`
// Threshold indicates the cutoff value between the left and right subtrees
Threshold float64 `json:"threshold"`
// LeftChild is the index of the node representing the left subtree
LeftChild int `json:"left_child"`
// LeftIsLeaf indicates whether the left subtree is a leaf node
LeftIsLeaf bool `json:"left_is_leaf"`
// RightChild is the index of the node representing the right subtree
RightChild int `json:"right_child"`
// RightIsLeaf indicates wherther the right subtree is a leaf node
RightIsLeaf bool `json:"right_is_leaf"`
}
// A DecisionTree is a mapping from a feature space to real numbers implemented with a decision tree
type DecisionTree struct {
// Nodes is a flat list of all nodes in the tree
Nodes []Node `json:"nodes"`
// Outputs is an array containing the outputs for each bin
Outputs []float64 `json:"outputs"`
// FeatureSize is the length of feature vectors processed by this tree
FeatureSize int `json:"feature_size"`
// Depth is the maximum depth of any leaf in the tree
Depth int `json:"depth"`
}
// Bin drops a feature vector down a decision tree and returns the index of the bin that it ends up in
func (t *DecisionTree) Bin(x []float64) int {
if len(x) != t.FeatureSize {
panic("feature vector had incorrect length")
}
if t.Nodes == nil {
panic("tree not initialized")
}
cur := t.Nodes[0]
for i := 0; i < t.Depth; i++ {
if x[cur.FeatureIndex] < cur.Threshold {
if cur.LeftIsLeaf {
return cur.LeftChild
}
cur = t.Nodes[cur.LeftChild]
} else {
if cur.RightIsLeaf {
return cur.RightChild
}
cur = t.Nodes[cur.RightChild]
}
}
panic("tree traversal did not terminate")
}
// Evaluate drops a feature vector down a decision tree and returns the output associated with the bin
// it ends up in.
func (t *DecisionTree) Evaluate(x []float64) float64 {
return t.Outputs[t.Bin(x)]
}
// An Ensemble outputs the sum of several decision trees
type Ensemble struct {
Trees []DecisionTree `json:"trees"`
}
// Evaluate computes the sum of the outputs of the component decision trees
func (e *Ensemble) Evaluate(x []float64) float64 {
var sum float64
for _, t := range e.Trees {
sum += t.Evaluate(x)
}
return sum
}
// Print satisfies the ranking.Scorer interface
func (e *Ensemble) Print() {
} | kite-golib/decisiontree/tree.go | 0.880181 | 0.645302 | tree.go | starcoder |
package plural
import (
"fmt"
"math"
"strconv"
"strings"
)
// Operands is a representation of CLDR Operands, see
// http://unicode.org/reports/tr35/tr35-numbers.html#Operands.
type Operands struct {
N float64 // The absolute value of the source number (integer and decimals).
I int64 // The integer digits of n.
V int64 // The number of visible fraction digits in n, with trailing zeros.
W int64 // The number of visible fraction digits in n, without trailing zeros.
F int64 // The visible fractional digits in n, with trailing zeros.
T int64 // The visible fractional digits in n, without trailing zeros.
C int64 // The compact decimal exponent value: exponent of the power of 10 used in compact decimal formatting.
E int64 // Currently, synonym for ‘c’. however, may be redefined in the future.
}
// NEqualsAny returns true if o represents an integer equal to any of the
// arguments.
func (o *Operands) NEqualsAny(any ...int64) bool {
for _, i := range any {
if o.I == i && o.T == 0 {
return true
}
}
return false
}
// NModEqualsAny returns true if o represents an integer equal to any of the
// arguments modulo mod.
func (o *Operands) NModEqualsAny(mod int64, any ...int64) bool {
modI := o.I % mod
for _, i := range any {
if modI == i && o.T == 0 {
return true
}
}
return false
}
// NInRange returns true if o represents an integer in the closed interval
// [from, to].
func (o *Operands) NInRange(from, to int64) bool {
return o.T == 0 && from <= o.I && o.I <= to
}
// NModInRange returns true if o represents an integer in the closed interval
// [from, to] modulo mod.
func (o *Operands) NModInRange(mod, from, to int64) bool {
modI := o.I % mod
return o.T == 0 && from <= modI && modI <= to
}
// NewOperands returns the operands for the given number.
func NewOperands(number interface{}) (*Operands, error) {
switch number := number.(type) {
case int:
return newOperandsInt64(int64(number)), nil
case int8:
return newOperandsInt64(int64(number)), nil
case int16:
return newOperandsInt64(int64(number)), nil
case int32:
return newOperandsInt64(int64(number)), nil
case int64:
return newOperandsInt64(number), nil
case string:
return newOperandsString(number)
case float32, float64:
return nil, fmt.Errorf("floats should be formatted into a string")
default:
return nil, fmt.Errorf("invalid type %T; expected integer or string", number)
}
}
func newOperandsInt64(i int64) *Operands {
if i < 0 {
i = -i
}
return &Operands{float64(i), i, 0, 0, 0, 0, 0, 0}
}
func newOperandsString(s string) (*Operands, error) {
if s[0] == '-' {
s = s[1:]
}
var err error
var n float64
var c int64
if parts := strings.Split(s, "c"); len(parts) == 2 {
n, err = strconv.ParseFloat(parts[0], 64)
if err != nil {
return nil, err
}
c, err = strconv.ParseInt(parts[1], 10, 64)
if err != nil {
return nil, err
}
n *= math.Pow10(int(c))
s = fmt.Sprintf("%f", n)
} else {
n, err = strconv.ParseFloat(s, 64)
if err != nil {
return nil, err
}
}
ops := &Operands{
N: n,
C: c,
E: c,
}
parts := strings.SplitN(s, ".", 2)
ops.I, err = strconv.ParseInt(parts[0], 10, 64)
if err != nil {
return nil, err
}
if len(parts) == 1 {
return ops, nil
}
fraction := parts[1]
ops.V = int64(len(fraction))
for i := ops.V - 1; i >= 0; i-- {
if fraction[i] != '0' {
ops.W = i + 1
break
}
}
if ops.V > 0 {
f, err := strconv.ParseInt(fraction, 10, 0)
if err != nil {
return nil, err
}
ops.F = f
}
if ops.W > 0 {
t, err := strconv.ParseInt(fraction[:ops.W], 10, 0)
if err != nil {
return nil, err
}
ops.T = t
}
return ops, nil
} | internal/plural/operands.go | 0.683736 | 0.401923 | operands.go | starcoder |
package data
import (
"math"
"github.com/calummccain/coxeter/vector"
)
type GoursatTetrahedron struct {
V vector.Vec4
E vector.Vec4
F vector.Vec4
C vector.Vec4
CFE vector.Vec4
CFV vector.Vec4
CEV vector.Vec4
FEV vector.Vec4
CF float64
CE float64
CV float64
FE float64
FV float64
EV float64
}
type Coxeter struct {
P float64
Q float64
R float64
A func(vector.Vec4) vector.Vec4
B func(vector.Vec4) vector.Vec4
C func(vector.Vec4) vector.Vec4
D func(vector.Vec4) vector.Vec4
FaceReflections []string
GoursatTetrahedron GoursatTetrahedron
}
type Honeycomb struct {
Coxeter Coxeter
CellType string
Vertices []vector.Vec4
Edges [][2]int
Faces [][]int
EVal float64
PVal float64
Space byte
Scale func(vector.Vec4) vector.Vec4
InnerProduct func(vector.Vec4, vector.Vec4) float64
}
type Cell struct {
Vertices []vector.Vec4
Edges [][2]int
Faces [][]int
NumVertices int
NumEdges int
NumFaces int
}
func (honeycomb *Honeycomb) DistanceSquared(a, b vector.Vec4) float64 {
if honeycomb.Space == 'e' {
return honeycomb.InnerProduct(vector.Diff4(a, b), vector.Diff4(a, b))
}
den := 1.0
if math.Abs(honeycomb.InnerProduct(a, a)) > DistanceSquaredEps {
den *= honeycomb.InnerProduct(a, a)
}
if math.Abs(honeycomb.InnerProduct(b, b)) > DistanceSquaredEps {
den *= honeycomb.InnerProduct(b, b)
}
return honeycomb.InnerProduct(a, b) * honeycomb.InnerProduct(a, b) / den
}
func (honeycomb *Honeycomb) GenerateCell(word string) Cell {
vertices := vector.TransformVertices(
honeycomb.Vertices,
word,
honeycomb.Coxeter.A,
honeycomb.Coxeter.B,
honeycomb.Coxeter.C,
honeycomb.Coxeter.D,
)
for i := 0; i < len(vertices); i++ {
vertices[i] = honeycomb.Scale(vertices[i])
}
return Cell{
Vertices: vertices,
Edges: honeycomb.Edges,
Faces: honeycomb.Faces,
NumVertices: len(vertices),
NumEdges: len(honeycomb.Edges),
NumFaces: len(honeycomb.Faces),
}
} | data/dataStruct.go | 0.540196 | 0.532243 | dataStruct.go | starcoder |
The downsampled data maintains the visual characteristics of the original line
using considerably fewer data points.
This is a translation of the javascript code at
https://github.com/sveinn-steinarsson/flot-downsample/
*/
package lttb
import (
"math"
)
// Point is a point on a line
type Point struct {
X float64
Y float64
}
// LTTB down-samples the data to contain only threshold number of points that
// have the same visual shape as the original data
func LTTB(data []Point, threshold int) []Point {
if threshold >= len(data) || threshold == 0 {
return data // Nothing to do
}
sampled := make([]Point, 0, threshold)
// Bucket size. Leave room for start and end data points
every := float64(len(data)-2) / float64(threshold-2)
sampled = append(sampled, data[0]) // Always add the first point
bucketStart := 1
bucketCenter := int(math.Floor(every)) + 1
var a int
for i := 0; i < threshold-2; i++ {
bucketEnd := int(math.Floor(float64(i+2)*every)) + 1
// Calculate point average for next bucket (containing c)
avgRangeStart := bucketCenter
avgRangeEnd := bucketEnd
if avgRangeEnd >= len(data) {
avgRangeEnd = len(data)
}
avgRangeLength := float64(avgRangeEnd - avgRangeStart)
var avgX, avgY float64
for ; avgRangeStart < avgRangeEnd; avgRangeStart++ {
avgX += data[avgRangeStart].X
avgY += data[avgRangeStart].Y
}
avgX /= avgRangeLength
avgY /= avgRangeLength
// Get the range for this bucket
rangeOffs := bucketStart
rangeTo := bucketCenter
// Point a
pointAX := data[a].X
pointAY := data[a].Y
maxArea := -1.0
var nextA int
for ; rangeOffs < rangeTo; rangeOffs++ {
// Calculate triangle area over three buckets
area := (pointAX-avgX)*(data[rangeOffs].Y-pointAY) - (pointAX-data[rangeOffs].X)*(avgY-pointAY)
// We only care about the relative area here.
// Calling math.Abs() is slower than squaring
area *= area
if area > maxArea {
maxArea = area
nextA = rangeOffs // Next a is this b
}
}
sampled = append(sampled, data[nextA]) // Pick this point from the bucket
a = nextA // This a is the next a (chosen b)
bucketStart = bucketCenter
bucketCenter = bucketEnd
}
sampled = append(sampled, data[len(data)-1]) // Always add last
return sampled
} | lttb.go | 0.755276 | 0.673 | lttb.go | starcoder |
package vm
import (
"unsafe"
)
// Expression or variable operation semantic
const (
SemanticOpNone = iota
SemanticOpRead
SemanticOpWrite
)
// Expression or variable lexical scoping
const (
LexicalScopingUnknown = iota
LexicalScopingGlobal // Expression or variable in global table
LexicalScopingUpvalue // Expression or variable in upvalue
LexicalScopingLocal // Expression or variable in current function
)
// AST base class, all AST node derived from this class and
// provide Visitor to Accept itself.
type SyntaxTree interface {
Accept(v Visitor, data unsafe.Pointer)
}
type Chunk struct {
Block SyntaxTree
Module *String
}
func NewChunk(block SyntaxTree, module *String) *Chunk {
return &Chunk{block, module}
}
func (c *Chunk) Accept(v Visitor, data unsafe.Pointer) {
v.VisitChunk(c, data)
}
type Block struct {
Statements []SyntaxTree
ReturnStmt SyntaxTree
}
func NewBlock() *Block {
return &Block{}
}
func (b *Block) Accept(v Visitor, data unsafe.Pointer) {
v.VisitBlock(b, data)
}
type ReturnStatement struct {
ExpList SyntaxTree
Line int
ExpValueCount int
}
func NewReturnStatement(line int) *ReturnStatement {
return &ReturnStatement{Line: line}
}
func (r *ReturnStatement) Accept(v Visitor, data unsafe.Pointer) {
v.VisitReturnStatement(r, data)
}
type BreakStatement struct {
Break TokenDetail
Loop SyntaxTree // For semantic
}
func NewBreakStatement(b TokenDetail) *BreakStatement {
return &BreakStatement{Break: b}
}
func (b *BreakStatement) Accept(v Visitor, data unsafe.Pointer) {
v.VisitBreakStatement(b, data)
}
type DoStatement struct {
Block SyntaxTree
}
func NewDoStatement(block SyntaxTree) *DoStatement {
return &DoStatement{Block: block}
}
func (d *DoStatement) Accept(v Visitor, data unsafe.Pointer) {
v.VisitDoStatement(d, data)
}
type WhileStatement struct {
Exp SyntaxTree
Block SyntaxTree
FirstLine int
LastLine int
}
func NewWhileStatement(exp, block SyntaxTree, firstLine, lastLine int) *WhileStatement {
return &WhileStatement{exp, block, firstLine, lastLine}
}
func (w *WhileStatement) Accept(v Visitor, data unsafe.Pointer) {
v.VisitWhileStatement(w, data)
}
type RepeatStatement struct {
Block SyntaxTree
Exp SyntaxTree
Line int // Line of until
}
func NewRepeatStatement(block, exp SyntaxTree, line int) *RepeatStatement {
return &RepeatStatement{block, exp, line}
}
func (r *RepeatStatement) Accept(v Visitor, data unsafe.Pointer) {
v.VisitRepeatStatement(r, data)
}
type IfStatement struct {
Exp SyntaxTree
TrueBranch SyntaxTree
FalseBranch SyntaxTree
Line int // Line of if
BlockEndLine int // End line of block
}
func NewIfStatement(exp, trueBranch, falseBranch SyntaxTree, line, blockEndline int) *IfStatement {
return &IfStatement{exp, trueBranch, falseBranch, line, blockEndline}
}
func (i *IfStatement) Accept(v Visitor, data unsafe.Pointer) {
v.VisitIfStatement(i, data)
}
type ElseIfStatement struct {
Exp SyntaxTree
TrueBranch SyntaxTree
FalseBranch SyntaxTree
Line int // Line of elseif
BlockEndLine int // End line of block
}
func NewElseIfStatement(exp, trueBranch, falseBranch SyntaxTree, line, blockEndLind int) *ElseIfStatement {
return &ElseIfStatement{exp, trueBranch, falseBranch, line, blockEndLind}
}
func (e *ElseIfStatement) Accept(v Visitor, data unsafe.Pointer) {
v.VisitElseIfStatement(e, data)
}
type ElseStatement struct {
Block SyntaxTree
}
func NewElseStatement(block SyntaxTree) *ElseStatement {
return &ElseStatement{block}
}
func (e *ElseStatement) Accept(v Visitor, data unsafe.Pointer) {
v.VisitElseStatement(e, data)
}
type NumericForStatement struct {
Name TokenDetail
Exp1 SyntaxTree
Exp2 SyntaxTree
Exp3 SyntaxTree
Block SyntaxTree
}
func NewNumericForStatement(name TokenDetail, exp1, exp2, exp3, block SyntaxTree) *NumericForStatement {
return &NumericForStatement{name, exp1, exp2, exp3, block}
}
func (n *NumericForStatement) Accept(v Visitor, data unsafe.Pointer) {
v.VisitNumericForStatement(n, data)
}
type GenericForStatement struct {
NameList SyntaxTree
ExpList SyntaxTree
Block SyntaxTree
Line int
}
func NewGenericForStatement(nameList, expList, block SyntaxTree, line int) *GenericForStatement {
return &GenericForStatement{nameList, expList, block, line}
}
func (g *GenericForStatement) Accept(v Visitor, data unsafe.Pointer) {
v.VisitGenericForStatement(g, data)
}
type FunctionStatement struct {
FuncName SyntaxTree
FuncBody SyntaxTree
}
func NewFunctionStatement(funcName, funcBody SyntaxTree) *FunctionStatement {
return &FunctionStatement{funcName, funcBody}
}
func (f *FunctionStatement) Accept(v Visitor, data unsafe.Pointer) {
v.VisitFunctionStatement(f, data)
}
type FunctionName struct {
Names []TokenDetail
MemberName TokenDetail
Scoping int // First token scoping
}
func NewFunctionName() *FunctionName {
return &FunctionName{Scoping: LexicalScopingUnknown}
}
func (f *FunctionName) Accept(v Visitor, data unsafe.Pointer) {
v.VisitFunctionName(f, data)
}
type LocalFunctionStatement struct {
Name TokenDetail
FuncBody SyntaxTree
}
func NewLocalFunctionStatement(name TokenDetail, funcBody SyntaxTree) *LocalFunctionStatement {
return &LocalFunctionStatement{name, funcBody}
}
func (l *LocalFunctionStatement) Accept(v Visitor, data unsafe.Pointer) {
v.VisitLocalFunctionStatement(l, data)
}
type LocalNameListStatement struct {
NameList SyntaxTree
ExpList SyntaxTree
Line int // Start Line
NameCount int // For semantic and code generate
}
func NewLocalNameListStatement(nameList, ExpList SyntaxTree, line int) *LocalNameListStatement {
return &LocalNameListStatement{nameList, ExpList, line, 0}
}
func (l *LocalNameListStatement) Accept(v Visitor, data unsafe.Pointer) {
v.VisitLocalNameListStatement(l, data)
}
type AssignmentStatement struct {
VarList SyntaxTree
ExpList SyntaxTree
Line int // Start line
VarCount int // For semantic
}
func NewAssignmentStatement(varList, expList SyntaxTree, line int) *AssignmentStatement {
return &AssignmentStatement{varList, expList, line, 0}
}
func (a *AssignmentStatement) Accept(v Visitor, data unsafe.Pointer) {
v.VisitAssignmentStatement(a, data)
}
type VarList struct {
VarList []SyntaxTree
}
func NewVarList() *VarList {
return &VarList{}
}
func (vl *VarList) Accept(v Visitor, data unsafe.Pointer) {
v.VisitVarList(vl, data)
}
type Terminator struct {
Token TokenDetail
Semantic int
Scoping int
}
func NewTerminator(token TokenDetail) *Terminator {
return &Terminator{token, SemanticOpNone, LexicalScopingUnknown}
}
func (t *Terminator) Accept(v Visitor, data unsafe.Pointer) {
v.VisitTerminator(t, data)
}
type BinaryExpression struct {
Left SyntaxTree
Right SyntaxTree
OpToken TokenDetail
}
func NewBinaryExpression(left, right SyntaxTree, op TokenDetail) *BinaryExpression {
return &BinaryExpression{left, right, op}
}
func (b *BinaryExpression) Accept(v Visitor, data unsafe.Pointer) {
v.VisitBinaryExpression(b, data)
}
type UnaryExpression struct {
Exp SyntaxTree
OpToken TokenDetail
}
func NewUnaryExpression(exp SyntaxTree, op TokenDetail) *UnaryExpression {
return &UnaryExpression{exp, op}
}
func (u *UnaryExpression) Accept(v Visitor, data unsafe.Pointer) {
v.VisitUnaryExpression(u, data)
}
type FunctionBody struct {
ParamList SyntaxTree
BLock SyntaxTree
HasSelf bool // For code generate, has 'self' param or not
Line int
}
func NewFunctionBody(paramList, block SyntaxTree, line int) *FunctionBody {
return &FunctionBody{paramList, block, false, line}
}
func (f *FunctionBody) Accept(v Visitor, data unsafe.Pointer) {
v.VisitFunctionBody(f, data)
}
type ParamList struct {
NameList SyntaxTree
Vararg bool
FixArgCount int // For semantic and code generate
}
func NewParamList(nameList SyntaxTree, vararg bool) *ParamList {
return &ParamList{nameList, vararg, 0}
}
func (p *ParamList) Accept(v Visitor, data unsafe.Pointer) {
v.VisitParamList(p, data)
}
type NameList struct {
Names []TokenDetail
}
func NewNameList() *NameList {
return &NameList{}
}
func (n *NameList) Accept(v Visitor, data unsafe.Pointer) {
v.VisitNameList(n, data)
}
type TableDefine struct {
Fields []SyntaxTree
Line int
}
func NewTableDefine(line int) *TableDefine {
return &TableDefine{Line: line}
}
func (t *TableDefine) Accept(v Visitor, data unsafe.Pointer) {
v.VisitTableDefine(t, data)
}
type TableIndexField struct {
Index SyntaxTree
Value SyntaxTree
Line int
}
func NewTableIndexField(index, value SyntaxTree, line int) *TableIndexField {
return &TableIndexField{index, value, line}
}
func (t *TableIndexField) Accept(v Visitor, data unsafe.Pointer) {
v.VisitTableIndexField(t, data)
}
type TableNameField struct {
Name TokenDetail
Value SyntaxTree
}
func NewTableNameField(name TokenDetail, value SyntaxTree) *TableNameField {
return &TableNameField{name, value}
}
func (t *TableNameField) Accept(v Visitor, data unsafe.Pointer) {
v.VisitTableNameField(t, data)
}
type TableArrayField struct {
Value SyntaxTree
Line int
}
func NewTableArrayField(value SyntaxTree, line int) *TableArrayField {
return &TableArrayField{value, line}
}
func (t *TableArrayField) Accept(v Visitor, data unsafe.Pointer) {
v.VisitTableArrayField(t, data)
}
type IndexAccessor struct {
Table SyntaxTree
Index SyntaxTree
Line int
Semantic int // For semantic
}
func NewIndexAccessor(table, index SyntaxTree, line int) *IndexAccessor {
return &IndexAccessor{table, index, line, SemanticOpNone}
}
func (i *IndexAccessor) Accept(v Visitor, data unsafe.Pointer) {
v.VisitIndexAccessor(i, data)
}
type MemberAccessor struct {
Table SyntaxTree
Member TokenDetail
Semantic int // For semantic
}
func NewMemberAccessor(table SyntaxTree, member TokenDetail) *MemberAccessor {
return &MemberAccessor{table, member, SemanticOpNone}
}
func (m *MemberAccessor) Accept(v Visitor, data unsafe.Pointer) {
v.VisitMemberAccessor(m, data)
}
type NormalFuncCall struct {
Caller SyntaxTree
Args SyntaxTree
Line int // Function call line in source
}
func NewNormalFuncCall(caller, args SyntaxTree, line int) *NormalFuncCall {
return &NormalFuncCall{caller, args, line}
}
func (n *NormalFuncCall) Accept(v Visitor, data unsafe.Pointer) {
v.VisitNormalFuncCall(n, data)
}
type MemberFuncCall struct {
Caller SyntaxTree
Member TokenDetail
Args SyntaxTree
Line int // Function call line in source
}
func NewMemberFuncCall(caller SyntaxTree, member TokenDetail, args SyntaxTree, line int) *MemberFuncCall {
return &MemberFuncCall{caller, member, args, line}
}
func (m *MemberFuncCall) Accept(v Visitor, data unsafe.Pointer) {
v.VisitMemberFuncCall(m, data)
}
type FuncCallArgs struct {
Arg SyntaxTree
Type int
ArgValueCount int // For code generate
}
const (
ArgTypeExpList = iota
ArgTypeTable
ArgTypeString
)
func NewFuncCallArgs(arg SyntaxTree, argType int) *FuncCallArgs {
return &FuncCallArgs{arg, argType, 0}
}
func (f *FuncCallArgs) Accept(v Visitor, data unsafe.Pointer) {
v.VisitFuncCallArgs(f, data)
}
type ExpressionList struct {
ExpList []SyntaxTree
Line int // Start line
}
func NewExpressionList(startLine int) *ExpressionList {
return &ExpressionList{Line: startLine}
}
func (e *ExpressionList) Accept(v Visitor, data unsafe.Pointer) {
v.VisitExpressionList(e, data)
} | Source/vm/SyntaxTree.go | 0.604282 | 0.564939 | SyntaxTree.go | starcoder |
package iso20022
// Specifies security rate details.
type CorporateActionRate28 struct {
// Quantity of additional intermediate securities/new equities awarded for a given quantity of securities derived from subscription.
AdditionalQuantityForSubscribedResultantSecurities *RatioFormat11Choice `xml:"AddtlQtyForSbcbdRsltntScties,omitempty"`
// Quantity of additional securities for a given quantity of underlying securities where underlying securities are not exchanged or debited, for example, 1 for 1: 1 new equity credited for every 1 underlying equity = 2 resulting equities.
AdditionalQuantityForExistingSecurities *RatioFormat11Choice `xml:"AddtlQtyForExstgScties,omitempty"`
// Quantity of new securities for a given quantity of underlying securities, where the underlying securities will be exchanged or debited, for example, 2 for 1: 2 new equities credited for every 1 underlying equity debited = 2 resulting equities.
NewToOld *RatioFormat12Choice `xml:"NewToOd,omitempty"`
// Rate used to determine the cash consideration split across outturn settlement transactions that are the result of a transformation of the parent transaction.
TransformationRate *PercentageRate `xml:"TrfrmatnRate,omitempty"`
// Rate used to calculate the amount of the charges/fees that cannot be categorised.
ChargesFees *RateAndAmountFormat14Choice `xml:"ChrgsFees,omitempty"`
// Percentage of fiscal tax to apply.
FiscalStamp *RateFormat6Choice `xml:"FsclStmp,omitempty"`
// Rate applicable to the event announced, for example, redemption rate for a redemption event.
ApplicableRate *RateFormat6Choice `xml:"AplblRate,omitempty"`
// Amount of money per equity allocated as the result of a tax credit.
TaxCreditRate []*TaxCreditRateFormat5Choice `xml:"TaxCdtRate,omitempty"`
}
func (c *CorporateActionRate28) AddAdditionalQuantityForSubscribedResultantSecurities() *RatioFormat11Choice {
c.AdditionalQuantityForSubscribedResultantSecurities = new(RatioFormat11Choice)
return c.AdditionalQuantityForSubscribedResultantSecurities
}
func (c *CorporateActionRate28) AddAdditionalQuantityForExistingSecurities() *RatioFormat11Choice {
c.AdditionalQuantityForExistingSecurities = new(RatioFormat11Choice)
return c.AdditionalQuantityForExistingSecurities
}
func (c *CorporateActionRate28) AddNewToOld() *RatioFormat12Choice {
c.NewToOld = new(RatioFormat12Choice)
return c.NewToOld
}
func (c *CorporateActionRate28) SetTransformationRate(value string) {
c.TransformationRate = (*PercentageRate)(&value)
}
func (c *CorporateActionRate28) AddChargesFees() *RateAndAmountFormat14Choice {
c.ChargesFees = new(RateAndAmountFormat14Choice)
return c.ChargesFees
}
func (c *CorporateActionRate28) AddFiscalStamp() *RateFormat6Choice {
c.FiscalStamp = new(RateFormat6Choice)
return c.FiscalStamp
}
func (c *CorporateActionRate28) AddApplicableRate() *RateFormat6Choice {
c.ApplicableRate = new(RateFormat6Choice)
return c.ApplicableRate
}
func (c *CorporateActionRate28) AddTaxCreditRate() *TaxCreditRateFormat5Choice {
newValue := new(TaxCreditRateFormat5Choice)
c.TaxCreditRate = append(c.TaxCreditRate, newValue)
return newValue
} | CorporateActionRate28.go | 0.873835 | 0.460774 | CorporateActionRate28.go | starcoder |
package idgen
import "bytes"
// TrieKey is a vector of bits backed by a Go byte slice in big endian byte order and big-endian bit order.
type TrieKey []byte
func (bs TrieKey) BitAt(offset int) byte {
if bs[offset/8]&(1<<(offset%8)) == 0 {
return 0
} else {
return 1
}
}
func (bs TrieKey) BitLen() int {
return 8 * len(bs)
}
func TrieKeyEqual(x, y TrieKey) bool {
return bytes.Equal(x, y)
}
// XorTrie is a trie for equal-length bit vectors, which stores values only in the leaves.
type XorTrie struct {
branch [2]*XorTrie
key TrieKey
}
func NewXorTrie() *XorTrie {
return &XorTrie{}
}
func (trie *XorTrie) Depth() int {
return trie.depth(0)
}
func (trie *XorTrie) depth(depth int) int {
if trie.branch[0] == nil && trie.branch[1] == nil {
return depth
} else {
return max(trie.branch[0].depth(depth+1), trie.branch[1].depth(depth+1))
}
}
func max(x, y int) int {
if x > y {
return x
}
return y
}
func (trie *XorTrie) Insert(q TrieKey) (insertedDepth int, insertedOK bool) {
return trie.insert(0, q)
}
func (trie *XorTrie) insert(depth int, q TrieKey) (insertedDepth int, insertedOK bool) {
if qb := trie.branch[q.BitAt(depth)]; qb != nil {
return qb.insert(depth+1, q)
} else {
if trie.key == nil {
trie.key = q
return depth, true
} else {
if TrieKeyEqual(trie.key, q) {
return depth, false
} else {
p := trie.key
trie.key = nil
// both branches are nil
trie.branch[0], trie.branch[1] = &XorTrie{}, &XorTrie{}
trie.branch[p.BitAt(depth)].insert(depth+1, p)
return trie.branch[q.BitAt(depth)].insert(depth+1, q)
}
}
}
}
func (trie *XorTrie) Remove(q TrieKey) (removedDepth int, removed bool) {
return trie.remove(0, q)
}
func (trie *XorTrie) remove(depth int, q TrieKey) (reachedDepth int, removed bool) {
if qb := trie.branch[q.BitAt(depth)]; qb != nil {
if d, ok := qb.remove(depth+1, q); ok {
trie.shrink()
return d, true
} else {
return d, false
}
} else {
if trie.key != nil && TrieKeyEqual(q, trie.key) {
trie.key = nil
return depth, true
} else {
return depth, false
}
}
}
func (trie *XorTrie) isEmptyLeaf() bool {
return trie.key == nil && trie.branch[0] == nil && trie.branch[1] == nil
}
func (trie *XorTrie) isNonEmptyLeaf() bool {
return trie.key != nil && trie.branch[0] == nil && trie.branch[1] == nil
}
func (trie *XorTrie) shrink() {
b0, b1 := trie.branch[0], trie.branch[1]
switch {
case b0.isEmptyLeaf() && b1.isEmptyLeaf():
trie.branch[0], trie.branch[1] = nil, nil
case b0.isEmptyLeaf() && b1.isNonEmptyLeaf():
trie.key = b1.key
trie.branch[0], trie.branch[1] = nil, nil
case b0.isNonEmptyLeaf() && b1.isEmptyLeaf():
trie.key = b0.key
trie.branch[0], trie.branch[1] = nil, nil
}
} | idgen/xortrie.go | 0.62223 | 0.685028 | xortrie.go | starcoder |
package car
import (
"encoding/binary"
"io"
)
const (
// PragmaSize is the size of the CARv2 pragma in bytes.
PragmaSize = 11
// HeaderSize is the fixed size of CARv2 header in number of bytes.
HeaderSize = 40
// CharacteristicsSize is the fixed size of Characteristics bitfield within CARv2 header in number of bytes.
CharacteristicsSize = 16
)
// The pragma of a CARv2, containing the version number.
// This is a valid CARv1 header, with version number of 2 and no root CIDs.
var Pragma = []byte{
0x0a, // unit(10)
0xa1, // map(1)
0x67, // string(7)
0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, // "version"
0x02, // uint(2)
}
type (
// Header represents the CARv2 header/pragma.
Header struct {
// 128-bit characteristics of this CARv2 file, such as order, deduplication, etc. Reserved for future use.
Characteristics Characteristics
// The byte-offset from the beginning of the CARv2 to the first byte of the CARv1 data payload.
DataOffset uint64
// The byte-length of the CARv1 data payload.
DataSize uint64
// The byte-offset from the beginning of the CARv2 to the first byte of the index payload. This value may be 0 to indicate the absence of index data.
IndexOffset uint64
}
// Characteristics is a bitfield placeholder for capturing the characteristics of a CARv2 such as order and determinism.
Characteristics struct {
Hi uint64
Lo uint64
}
)
// fullyIndexedCharPos is the position of Characteristics.Hi bit that specifies whether the index is a catalog af all CIDs or not.
const fullyIndexedCharPos = 7 // left-most bit
// WriteTo writes this characteristics to the given w.
func (c Characteristics) WriteTo(w io.Writer) (n int64, err error) {
buf := make([]byte, 16)
binary.LittleEndian.PutUint64(buf[:8], c.Hi)
binary.LittleEndian.PutUint64(buf[8:], c.Lo)
written, err := w.Write(buf)
return int64(written), err
}
func (c *Characteristics) ReadFrom(r io.Reader) (int64, error) {
buf := make([]byte, CharacteristicsSize)
read, err := io.ReadFull(r, buf)
n := int64(read)
if err != nil {
return n, err
}
c.Hi = binary.LittleEndian.Uint64(buf[:8])
c.Lo = binary.LittleEndian.Uint64(buf[8:])
return n, nil
}
// IsFullyIndexed specifies whether the index of CARv2 represents a catalog of all CID segments.
// See StoreIdentityCIDs
func (c *Characteristics) IsFullyIndexed() bool {
return isBitSet(c.Hi, fullyIndexedCharPos)
}
// SetFullyIndexed sets whether of CARv2 represents a catalog of all CID segments.
func (c *Characteristics) SetFullyIndexed(b bool) {
if b {
c.Hi = setBit(c.Hi, fullyIndexedCharPos)
} else {
c.Hi = unsetBit(c.Hi, fullyIndexedCharPos)
}
}
func setBit(n uint64, pos uint) uint64 {
n |= 1 << pos
return n
}
func unsetBit(n uint64, pos uint) uint64 {
mask := uint64(^(1 << pos))
n &= mask
return n
}
func isBitSet(n uint64, pos uint) bool {
bit := n & (1 << pos)
return bit > 0
}
// NewHeader instantiates a new CARv2 header, given the data size.
func NewHeader(dataSize uint64) Header {
header := Header{
DataSize: dataSize,
}
header.DataOffset = PragmaSize + HeaderSize
header.IndexOffset = header.DataOffset + dataSize
return header
}
// WithIndexPadding sets the index offset from the beginning of the file for this header and returns
// the header for convenient chained calls.
// The index offset is calculated as the sum of PragmaSize, HeaderSize,
// Header.DataSize, and the given padding.
func (h Header) WithIndexPadding(padding uint64) Header {
h.IndexOffset = h.IndexOffset + padding
return h
}
// WithDataPadding sets the data payload byte-offset from the beginning of the file for this header
// and returns the header for convenient chained calls.
// The Data offset is calculated as the sum of PragmaSize, HeaderSize and the given padding.
// The call to this function also shifts the Header.IndexOffset forward by the given padding.
func (h Header) WithDataPadding(padding uint64) Header {
h.DataOffset = PragmaSize + HeaderSize + padding
h.IndexOffset = h.IndexOffset + padding
return h
}
func (h Header) WithDataSize(size uint64) Header {
h.DataSize = size
h.IndexOffset = size + h.IndexOffset
return h
}
// HasIndex indicates whether the index is present.
func (h Header) HasIndex() bool {
return h.IndexOffset != 0
}
// WriteTo serializes this header as bytes and writes them using the given io.Writer.
func (h Header) WriteTo(w io.Writer) (n int64, err error) {
wn, err := h.Characteristics.WriteTo(w)
n += wn
if err != nil {
return
}
buf := make([]byte, 24)
binary.LittleEndian.PutUint64(buf[:8], h.DataOffset)
binary.LittleEndian.PutUint64(buf[8:16], h.DataSize)
binary.LittleEndian.PutUint64(buf[16:], h.IndexOffset)
written, err := w.Write(buf)
n += int64(written)
return n, err
}
// ReadFrom populates fields of this header from the given r.
func (h *Header) ReadFrom(r io.Reader) (int64, error) {
n, err := h.Characteristics.ReadFrom(r)
if err != nil {
return n, err
}
buf := make([]byte, 24)
read, err := io.ReadFull(r, buf)
n += int64(read)
if err != nil {
return n, err
}
h.DataOffset = binary.LittleEndian.Uint64(buf[:8])
h.DataSize = binary.LittleEndian.Uint64(buf[8:16])
h.IndexOffset = binary.LittleEndian.Uint64(buf[16:])
return n, nil
} | v2/car.go | 0.657868 | 0.535402 | car.go | starcoder |
package column
import (
"github.com/ryogrid/SamehadaDB/types"
)
type Column struct {
columnName string
columnType types.TypeID
fixedLength uint32 // For a non-inlined column, this is the size of a pointer. Otherwise, the size of the fixed length column
variableLength uint32 // For an inlined column, 0. Otherwise, the length of the variable length column
columnOffset uint32 // Column offset in the tuple
hasIndex bool // whether the column has index data
isLeft bool // when temporal schema, this is used for join
// should be pointer of subtype of expression.Expression
// this member is used and needed at temporarily created table (schema) on query execution
expr_ interface{}
}
// expr argument should be pointer of subtype of expression.Expression
func NewColumn(name string, columnType types.TypeID, hasIndex bool, expr interface{}) *Column {
if columnType != types.Varchar {
return &Column{name, columnType, columnType.Size(), 0, 0, hasIndex, true, expr}
}
return &Column{name, types.Varchar, 4, 255, 0, hasIndex, true, expr}
}
func (c *Column) IsInlined() bool {
return c.columnType != types.Varchar
}
func (c *Column) GetType() types.TypeID {
return c.columnType
}
func (c *Column) GetOffset() uint32 {
return c.columnOffset
}
func (c *Column) SetOffset(offset uint32) {
c.columnOffset = offset
}
func (c *Column) FixedLength() uint32 {
return c.fixedLength
}
func (c *Column) SetFixedLength(fixedLength uint32) {
c.fixedLength = fixedLength
}
func (c *Column) VariableLength() uint32 {
return c.variableLength
}
func (c *Column) SetVariableLength(variableLength uint32) {
c.variableLength = variableLength
}
func (c *Column) GetColumnName() string {
return c.columnName
}
func (c *Column) HasIndex() bool {
return c.hasIndex
}
func (c *Column) SetHasIndex(hasIndex bool) {
c.hasIndex = hasIndex
}
func (c *Column) IsLeft() bool {
return c.isLeft
}
func (c *Column) SetIsLeft(isLeft bool) {
c.isLeft = isLeft
}
// returned value should be used with type validation at expression.Expression
func (c *Column) GetExpr() interface{} {
return c.expr_
}
func (c *Column) SetExpr(expr interface{}) {
c.expr_ = expr
} | storage/table/column/column.go | 0.548432 | 0.438785 | column.go | starcoder |
// We represent flips with an array of 0s and 1s. 1 for heads, and 0 for tails.
package main
import "fmt"
import "math/rand"
import "time"
import "flag"
// Return an array of random flips, of the given length
func create_flips(number_of_flips int) ([]int) {
flips := make([]int, number_of_flips)
for ii := range flips {
flips[ii] = rand.Intn(2) // 1 is heads, 0 is tails
}
return flips
}
// Is this a streak? Starting at index flip_number and looking at the next steak_length flips, if they are all heads then this is true
func streak_found(flips []int, flip_number, streak_length int) (bool) {
if flips[flip_number] != 1 { return false } // Not heads
for ii := 0; ii < streak_length; ii++ { // Check if any of the flips in this potential streak don't match the first
if flips[flip_number] != flips[flip_number + ii] { return false }
}
return true
}
// Find a streak by picking random spots in our array of flips and then checking for a streak there. Return as soon as we find one.
func find_streak_random(flips []int, streak_length int) (int) {
if len(flips) <= streak_length { return -1 }
tries := 2 * len(flips) + 10 // Since it's random we need to give up at some point. This is purely my gut feeling for what is 'enough' tries.
for try_number := 0; try_number < tries; try_number++ {
flip_number := rand.Intn(len(flips) - streak_length) // For a 100 flip array and 3 flip streak, this is 100 - 3 = 97 -> 0...96
if streak_found(flips, flip_number, streak_length) {
return flip_number
}
}
return -1 // This represents a failure to find any streaks of the desired length in this array of flips
}
func main() {
max_streak_length := flag.Int("streak", 3, "Number of 'heads' in a row we consider a 'streak'")
max_number_of_flips := flag.Int("flips", 100, "How many times with flip our coin")
round_to_perform := flag.Int("rounds", 10000, "A round consists of flipping a coin the desired times, finding 1 streak in it, and then checking the next flip after the streak.")
flag.Parse()
for streak_length := 1; streak_length <= *max_streak_length; streak_length++ {
for number_of_flips := 10; number_of_flips <= *max_number_of_flips; number_of_flips *= 10 {
count := 0
completed_rounds := 0 // A completed round is any where we are able to find a streak
rand.Seed(time.Now().UnixNano())
for round_num := 0; round_num < *round_to_perform; round_num++ {
flips := create_flips(number_of_flips) // Generate our random array of flips
streak_index := find_streak_random(flips, streak_length)
if streak_index < 0 { continue } // We failed to find a streak in this array of flips
streak_continued := flips[streak_index] == flips[streak_index + streak_length] // The key test
completed_rounds++
if streak_continued { count++ }
}
percent_continued_streak := (100.0 * float64(count) / float64(completed_rounds))
fmt.Printf("Looking for a streak of length %2d in %5d total flips. Performed %d rounds, and %6d were successful, found %.2f%% continued the streak.\n", streak_length, number_of_flips, *round_to_perform, completed_rounds, percent_continued_streak)
}
}
} | flips.go | 0.696475 | 0.479138 | flips.go | starcoder |
package clients
import (
"errors"
"fmt"
"strings"
"github.com/Kamva/octopus/base"
)
// fetchSingleRecord Fetch a single result from rows and set into record data
func fetchSingleRecord(rows base.SQLRows, data *base.RecordData) error {
if rows.Next() {
// Get list of result columns
cols, _ := rows.Columns()
// get column pointers variable
columns := make([]interface{}, len(cols))
columnPointers := make([]interface{}, len(cols))
for i := range columns {
columnPointers[i] = &columns[i]
}
// scan the result into column pointers
err := rows.Scan(columnPointers...)
// set retrieved data from db to record data
for i, colName := range cols {
data.Set(colName, columns[i])
}
return err
}
data.Zero()
return errors.New("no result found")
}
func fetchResults(rows base.SQLRows) (base.RecordDataSet, error) {
// Get list of result columns
cols, _ := rows.Columns()
resultSet := make(base.RecordDataSet, 0)
data := *base.ZeroRecordData()
for rows.Next() {
// get column pointers variable
columns := make([]interface{}, len(cols))
columnPointers := make([]interface{}, len(cols))
for i := range columns {
columnPointers[i] = &columns[i]
}
// Scan the result into the column pointers...
err := rows.Scan(columnPointers...)
if err != nil {
return nil, err
}
// set retrieved data from db to record data
for i, colName := range cols {
data.Set(colName, columns[i])
}
resultSet = append(resultSet, data)
data.Zero()
}
return resultSet, nil
}
func prepareUpdate(data base.RecordData, enquoter base.Enquoter) string {
updateParts := make([]string, 0, data.Length())
for _, column := range data.GetColumns() {
updateParts = append(updateParts, fmt.Sprintf("%s = %s", column, enquoter(data.Get(column))))
}
return strings.Join(updateParts, ", ")
}
// queryDB executes given sqlQuery string and returns result rows and error
// This is separated as a variable to mocked easily
var queryDB = func(db base.SQLDatabase, query string) (base.SQLRows, error) {
return db.Query(query)
} | clients/helpers.go | 0.579995 | 0.403684 | helpers.go | starcoder |
package knn
import (
"fmt"
"github.com/gonum/matrix/mat64"
"github.com/sjwhitworth/golearn/base"
"github.com/sjwhitworth/golearn/metrics/pairwise"
"github.com/sjwhitworth/golearn/utilities"
)
// A KNNClassifier consists of a data matrix, associated labels in the same order as the matrix, and a distance function.
// The accepted distance functions at this time are 'euclidean' and 'manhattan'.
// Optimisations only occur when things are identically group into identical
// AttributeGroups, which don't include the class variable, in the same order.
type KNNClassifier struct {
base.BaseEstimator
TrainingData base.FixedDataGrid
DistanceFunc string
NearestNeighbours int
AllowOptimisations bool
}
// NewKnnClassifier returns a new classifier
func NewKnnClassifier(distfunc string, neighbours int) *KNNClassifier {
KNN := KNNClassifier{}
KNN.DistanceFunc = distfunc
KNN.NearestNeighbours = neighbours
KNN.AllowOptimisations = true
return &KNN
}
// Fit stores the training data for later
func (KNN *KNNClassifier) Fit(trainingData base.FixedDataGrid) {
KNN.TrainingData = trainingData
}
func (KNN *KNNClassifier) canUseOptimisations(what base.FixedDataGrid) bool {
// Check that the two have exactly the same layout
if !base.CheckStrictlyCompatible(what, KNN.TrainingData) {
return false
}
// Check that the two are DenseInstances
whatd, ok1 := what.(*base.DenseInstances)
_, ok2 := KNN.TrainingData.(*base.DenseInstances)
if !ok1 || !ok2 {
return false
}
// Check that no Class Attributes are mixed in with the data
classAttrs := whatd.AllClassAttributes()
normalAttrs := base.NonClassAttributes(whatd)
// Retrieve all the AGs
ags := whatd.AllAttributeGroups()
classAttrGroups := make([]base.AttributeGroup, 0)
for agName := range ags {
ag := ags[agName]
attrs := ag.Attributes()
matched := false
for _, a := range attrs {
for _, c := range classAttrs {
if a.Equals(c) {
matched = true
}
}
}
if matched {
classAttrGroups = append(classAttrGroups, ag)
}
}
for _, cag := range classAttrGroups {
attrs := cag.Attributes()
common := base.AttributeIntersect(normalAttrs, attrs)
if len(common) != 0 {
return false
}
}
// Check that all of the Attributes are numeric
for _, a := range normalAttrs {
if _, ok := a.(*base.FloatAttribute); !ok {
return false
}
}
// If that's fine, return true
return true
}
// Predict returns a classification for the vector, based on a vector input, using the KNN algorithm.
func (KNN *KNNClassifier) Predict(what base.FixedDataGrid) base.FixedDataGrid {
// Check what distance function we are using
var distanceFunc pairwise.PairwiseDistanceFunc
switch KNN.DistanceFunc {
case "euclidean":
distanceFunc = pairwise.NewEuclidean()
case "manhattan":
distanceFunc = pairwise.NewManhattan()
default:
panic("unsupported distance function")
}
// Check Compatibility
allAttrs := base.CheckCompatible(what, KNN.TrainingData)
if allAttrs == nil {
// Don't have the same Attributes
return nil
}
// Use optimised version if permitted
if KNN.AllowOptimisations {
if KNN.DistanceFunc == "euclidean" {
if KNN.canUseOptimisations(what) {
return KNN.optimisedEuclideanPredict(what.(*base.DenseInstances))
}
}
}
fmt.Println("Optimisations are switched off")
// Remove the Attributes which aren't numeric
allNumericAttrs := make([]base.Attribute, 0)
for _, a := range allAttrs {
if fAttr, ok := a.(*base.FloatAttribute); ok {
allNumericAttrs = append(allNumericAttrs, fAttr)
}
}
// Generate return vector
ret := base.GeneratePredictionVector(what)
// Resolve Attribute specifications for both
whatAttrSpecs := base.ResolveAttributes(what, allNumericAttrs)
trainAttrSpecs := base.ResolveAttributes(KNN.TrainingData, allNumericAttrs)
// Reserve storage for most the most similar items
distances := make(map[int]float64)
// Reserve storage for voting map
maxmap := make(map[string]int)
// Reserve storage for row computations
trainRowBuf := make([]float64, len(allNumericAttrs))
predRowBuf := make([]float64, len(allNumericAttrs))
_, maxRow := what.Size()
curRow := 0
// Iterate over all outer rows
what.MapOverRows(whatAttrSpecs, func(predRow [][]byte, predRowNo int) (bool, error) {
if (curRow%1) == 0 && curRow > 0 {
fmt.Printf("KNN: %.2f %% done\n", float64(curRow)*100.0/float64(maxRow))
}
curRow++
// Read the float values out
for i, _ := range allNumericAttrs {
predRowBuf[i] = base.UnpackBytesToFloat(predRow[i])
}
predMat := utilities.FloatsToMatrix(predRowBuf)
// Find the closest match in the training data
KNN.TrainingData.MapOverRows(trainAttrSpecs, func(trainRow [][]byte, srcRowNo int) (bool, error) {
// Read the float values out
for i, _ := range allNumericAttrs {
trainRowBuf[i] = base.UnpackBytesToFloat(trainRow[i])
}
// Compute the distance
trainMat := utilities.FloatsToMatrix(trainRowBuf)
distances[srcRowNo] = distanceFunc.Distance(predMat, trainMat)
return true, nil
})
sorted := utilities.SortIntMap(distances)
values := sorted[:KNN.NearestNeighbours]
maxClass := KNN.vote(maxmap, values)
base.SetClass(ret, predRowNo, maxClass)
return true, nil
})
return ret
}
func (KNN *KNNClassifier) vote(maxmap map[string]int, values []int) string {
// Reset maxMap
for a := range maxmap {
maxmap[a] = 0
}
// Refresh maxMap
for _, elem := range values {
label := base.GetClass(KNN.TrainingData, elem)
if _, ok := maxmap[label]; ok {
maxmap[label]++
} else {
maxmap[label] = 1
}
}
// Sort the maxMap
var maxClass string
maxVal := -1
for a := range maxmap {
if maxmap[a] > maxVal {
maxVal = maxmap[a]
maxClass = a
}
}
return maxClass
}
// A KNNRegressor consists of a data matrix, associated result variables in the same order as the matrix, and a name.
type KNNRegressor struct {
base.BaseEstimator
Values []float64
DistanceFunc string
}
// NewKnnRegressor mints a new classifier.
func NewKnnRegressor(distfunc string) *KNNRegressor {
KNN := KNNRegressor{}
KNN.DistanceFunc = distfunc
return &KNN
}
func (KNN *KNNRegressor) Fit(values []float64, numbers []float64, rows int, cols int) {
if rows != len(values) {
panic(mat64.ErrShape)
}
KNN.Data = mat64.NewDense(rows, cols, numbers)
KNN.Values = values
}
func (KNN *KNNRegressor) Predict(vector *mat64.Dense, K int) float64 {
// Get the number of rows
rows, _ := KNN.Data.Dims()
rownumbers := make(map[int]float64)
labels := make([]float64, 0)
// Check what distance function we are using
var distanceFunc pairwise.PairwiseDistanceFunc
switch KNN.DistanceFunc {
case "euclidean":
distanceFunc = pairwise.NewEuclidean()
case "manhattan":
distanceFunc = pairwise.NewManhattan()
default:
panic("unsupported distance function")
}
for i := 0; i < rows; i++ {
row := KNN.Data.RowView(i)
rowMat := utilities.FloatsToMatrix(row)
distance := distanceFunc.Distance(rowMat, vector)
rownumbers[i] = distance
}
sorted := utilities.SortIntMap(rownumbers)
values := sorted[:K]
var sum float64
for _, elem := range values {
value := KNN.Values[elem]
labels = append(labels, value)
sum += value
}
average := sum / float64(K)
return average
} | knn/knn.go | 0.760295 | 0.52275 | knn.go | starcoder |
// Pointer: The pointer is used to points to the first element of the array that is accessible through the slice. Here, it is not necessary that the pointed element is the first element of the array.
// Length: The length is the total number of elements present in the array.
// Capacity: The capacity represents the maximum size upto which it can expand.
//using make() function .... make() function is used to create an empty slice. Here, empty slices are those slices that contain an empty array reference.
// Go program to illustrate how to create slices
// Using make function
package main
import (
"bytes"
"fmt"
"sort"
)
func main() {
// Creating an array of size 7
// and slice this array till 4
// and return the reference of the slice
// Using make function
var my_slice_1 = make([]int, 4, 7)
fmt.Printf("Slice 1 = %v, \nlength = %d, \ncapacity = %d\n",
my_slice_1, len(my_slice_1), cap(my_slice_1))
// Creating another array of size 7
// and return the reference of the slice
// Using make function
var my_slice = make([]int, 3)
fmt.Printf("Slice 2 = %v, \nlength = %d, \ncapacity = %d\n",
my_slice, len(my_slice), cap(my_slice))
//using an array... synatx: array_name[low:high]
arr := [4]string{"welcome", "to", "Earth!"}
// Creating slices from the given array
var myslc = arr[1:2]
myslc1 := arr[0:]
fmt.Println("My slice : ", myslc)
fmt.Println("My slice : ", myslc1)
//using slice literal
var slice_1 = []string{"hello", "naruto"}
fmt.Println((slice_1))
//sorting of slice
sort.Strings(slice_1)
fmt.Println("slice 1 after sorting: ", slice_1)
// Creating multi-dimensional slice
s1 := [][]int{{12, 34},
{56, 47},
{29, 40},
{46, 78}}
fmt.Println("slice is: ", s1)
// Iterate slice
// using range in for loop
// without index i.e Using a blank identifier in for loop:
for _, ele := range slice_1 {
fmt.Printf("Element = %s\n", ele)
}
//copying one slice into another ... syntax: func copy(dst, src []Type) int
slcc1 := []int{58, 69, 40, 45, 11, 56, 67, 21, 65}
//var slcc2 []int
slcc3 := make([]int, 5)
//slcc4 := []int{78, 50, 67, 77}
copy_1 := copy(slcc3, slcc1)
fmt.Println("\nSlice:", slcc3)
fmt.Println("Total number of elements copied:", copy_1)
//comparing two slices
sli_1 := []byte{'I', 'S', 'H', 'A', 'D'}
sli_2 := []byte{'I', 'S', 'H', 'A', 'A'}
res := bytes.Compare(sli_1, sli_2)
if res == 0 {
fmt.Println("!..Slices are equal..!")
} else {
fmt.Println("!..Slice are not equal..!")
}
} | slices.go | 0.622689 | 0.579757 | slices.go | starcoder |
package dst
// Beta distribution reparametrized using mean (μ) and sample size (ν).
// <NAME>. (2011). Doing Bayesian data analysis: A tutorial with R and BUGS. p. 83: Academic Press / Elsevier. ISBN 978-0123814852.
// BetaμνPDF returns the PDF of the Beta distribution reparametrized using mean and sample size.
func BetaμνPDF(μ, ν float64) func(x float64) float64 {
α := μ * ν
β := (1 - μ) * ν
return BetaPDF(α, β)
}
// BetaμνLnPDF returns the natural logarithm of the PDF of the Beta distribution reparametrized using mean and sample size.
func BetaμνLnPDF(μ, ν float64) func(x float64) float64 {
α := μ * ν
β := (1 - μ) * ν
return BetaLnPDF(α, β)
}
// BetaμνNext returns random number drawn from the Beta distribution reparametrized using mean and sample size.
func BetaμνNext(μ, ν float64) float64 {
α := μ * ν
β := (1 - μ) * ν
if ν <= 0 {
return NaN
}
return BetaNext(α, β)
}
// Betaμν returns the random number generator with Beta distribution reparametrized using mean and sample size.
func Betaμν(μ, ν float64) func() float64 {
α := μ * ν
β := (1 - μ) * ν
return func() float64 { return BetaNext(α, β) }
}
// BetaμνPDFAt returns the value of PDF of Beta distribution at x.
func BetaμνPDFAt(μ, ν, x float64) float64 {
pdf := BetaμνPDF(μ, ν)
return pdf(x)
}
// BetaμνCDF returns the CDF of the Beta distribution reparametrized using mean and sample size.
func BetaμνCDF(μ, ν float64) func(x float64) float64 {
α := μ * ν
β := (1 - μ) * ν
return BetaCDF(α, β)
}
// BetaμνCDFAt returns the value of CDF of the Beta distribution reparametrized using mean and sample size, at x.
func BetaμνCDFAt(μ, ν, x float64) float64 {
cdf := BetaCDF(μ, ν)
return cdf(x)
}
// BetaμνQtl returns the inverse of the CDF (quantile) of the Beta distribution reparametrized using mean and sample size.
func BetaμνQtl(μ, ν float64) func(p float64) float64 {
// p: probability for which the quantile is evaluated
α := μ * ν
β := (1 - μ) * ν
return BetaQtl(α, β)
}
// BetaμνQtlFor returns the inverse of the CDF (quantile) of the Beta distribution reparametrized using mean and sample size, for a given probability.
func BetaμνQtlFor(μ, ν, p float64) float64 {
cdf := BetaμνQtl(μ, ν)
return cdf(p)
} | dst/beta-mu-nu.go | 0.920115 | 0.657305 | beta-mu-nu.go | starcoder |
package plaid
import (
"encoding/json"
)
// OverrideAccounts Data to use to set values of test accounts. Some values cannot be specified in the schema and will instead will be calculated from other test data in order to achieve more consistent, realistic test data.
type OverrideAccounts struct {
Type OverrideAccountType `json:"type"`
Subtype NullableAccountSubtype `json:"subtype"`
// If provided, the account will start with this amount as the current balance.
StartingBalance float32 `json:"starting_balance"`
// If provided, the account will always have this amount as its available balance, regardless of current balance or changes in transactions over time.
ForceAvailableBalance float32 `json:"force_available_balance"`
// ISO-4217 currency code. If provided, the account will be denominated in the given currency. Transactions will also be in this currency by default.
Currency string `json:"currency"`
Meta Meta `json:"meta"`
Numbers Numbers `json:"numbers"`
// Specify the list of transactions on the account.
Transactions []TransactionOverride `json:"transactions"`
Holdings *HoldingsOverride `json:"holdings,omitempty"`
InvestmentTransactions *InvestmentsTransactionsOverride `json:"investment_transactions,omitempty"`
Identity OwnerOverride `json:"identity"`
Liability LiabilityOverride `json:"liability"`
InflowModel InflowModel `json:"inflow_model"`
Income *IncomeOverride `json:"income,omitempty"`
AdditionalProperties map[string]interface{}
}
type _OverrideAccounts OverrideAccounts
// NewOverrideAccounts instantiates a new OverrideAccounts object
// This constructor will assign default values to properties that have it defined,
// and makes sure properties required by API are set, but the set of arguments
// will change when the set of required properties is changed
func NewOverrideAccounts(type_ OverrideAccountType, subtype NullableAccountSubtype, startingBalance float32, forceAvailableBalance float32, currency string, meta Meta, numbers Numbers, transactions []TransactionOverride, identity OwnerOverride, liability LiabilityOverride, inflowModel InflowModel) *OverrideAccounts {
this := OverrideAccounts{}
this.Type = type_
this.Subtype = subtype
this.StartingBalance = startingBalance
this.ForceAvailableBalance = forceAvailableBalance
this.Currency = currency
this.Meta = meta
this.Numbers = numbers
this.Transactions = transactions
this.Identity = identity
this.Liability = liability
this.InflowModel = inflowModel
return &this
}
// NewOverrideAccountsWithDefaults instantiates a new OverrideAccounts object
// This constructor will only assign default values to properties that have it defined,
// but it doesn't guarantee that properties required by API are set
func NewOverrideAccountsWithDefaults() *OverrideAccounts {
this := OverrideAccounts{}
return &this
}
// GetType returns the Type field value
func (o *OverrideAccounts) GetType() OverrideAccountType {
if o == nil {
var ret OverrideAccountType
return ret
}
return o.Type
}
// GetTypeOk returns a tuple with the Type field value
// and a boolean to check if the value has been set.
func (o *OverrideAccounts) GetTypeOk() (*OverrideAccountType, bool) {
if o == nil {
return nil, false
}
return &o.Type, true
}
// SetType sets field value
func (o *OverrideAccounts) SetType(v OverrideAccountType) {
o.Type = v
}
// GetSubtype returns the Subtype field value
// If the value is explicit nil, the zero value for AccountSubtype will be returned
func (o *OverrideAccounts) GetSubtype() AccountSubtype {
if o == nil || o.Subtype.Get() == nil {
var ret AccountSubtype
return ret
}
return *o.Subtype.Get()
}
// GetSubtypeOk returns a tuple with the Subtype field value
// and a boolean to check if the value has been set.
// NOTE: If the value is an explicit nil, `nil, true` will be returned
func (o *OverrideAccounts) GetSubtypeOk() (*AccountSubtype, bool) {
if o == nil {
return nil, false
}
return o.Subtype.Get(), o.Subtype.IsSet()
}
// SetSubtype sets field value
func (o *OverrideAccounts) SetSubtype(v AccountSubtype) {
o.Subtype.Set(&v)
}
// GetStartingBalance returns the StartingBalance field value
func (o *OverrideAccounts) GetStartingBalance() float32 {
if o == nil {
var ret float32
return ret
}
return o.StartingBalance
}
// GetStartingBalanceOk returns a tuple with the StartingBalance field value
// and a boolean to check if the value has been set.
func (o *OverrideAccounts) GetStartingBalanceOk() (*float32, bool) {
if o == nil {
return nil, false
}
return &o.StartingBalance, true
}
// SetStartingBalance sets field value
func (o *OverrideAccounts) SetStartingBalance(v float32) {
o.StartingBalance = v
}
// GetForceAvailableBalance returns the ForceAvailableBalance field value
func (o *OverrideAccounts) GetForceAvailableBalance() float32 {
if o == nil {
var ret float32
return ret
}
return o.ForceAvailableBalance
}
// GetForceAvailableBalanceOk returns a tuple with the ForceAvailableBalance field value
// and a boolean to check if the value has been set.
func (o *OverrideAccounts) GetForceAvailableBalanceOk() (*float32, bool) {
if o == nil {
return nil, false
}
return &o.ForceAvailableBalance, true
}
// SetForceAvailableBalance sets field value
func (o *OverrideAccounts) SetForceAvailableBalance(v float32) {
o.ForceAvailableBalance = v
}
// GetCurrency returns the Currency field value
func (o *OverrideAccounts) GetCurrency() string {
if o == nil {
var ret string
return ret
}
return o.Currency
}
// GetCurrencyOk returns a tuple with the Currency field value
// and a boolean to check if the value has been set.
func (o *OverrideAccounts) GetCurrencyOk() (*string, bool) {
if o == nil {
return nil, false
}
return &o.Currency, true
}
// SetCurrency sets field value
func (o *OverrideAccounts) SetCurrency(v string) {
o.Currency = v
}
// GetMeta returns the Meta field value
func (o *OverrideAccounts) GetMeta() Meta {
if o == nil {
var ret Meta
return ret
}
return o.Meta
}
// GetMetaOk returns a tuple with the Meta field value
// and a boolean to check if the value has been set.
func (o *OverrideAccounts) GetMetaOk() (*Meta, bool) {
if o == nil {
return nil, false
}
return &o.Meta, true
}
// SetMeta sets field value
func (o *OverrideAccounts) SetMeta(v Meta) {
o.Meta = v
}
// GetNumbers returns the Numbers field value
func (o *OverrideAccounts) GetNumbers() Numbers {
if o == nil {
var ret Numbers
return ret
}
return o.Numbers
}
// GetNumbersOk returns a tuple with the Numbers field value
// and a boolean to check if the value has been set.
func (o *OverrideAccounts) GetNumbersOk() (*Numbers, bool) {
if o == nil {
return nil, false
}
return &o.Numbers, true
}
// SetNumbers sets field value
func (o *OverrideAccounts) SetNumbers(v Numbers) {
o.Numbers = v
}
// GetTransactions returns the Transactions field value
func (o *OverrideAccounts) GetTransactions() []TransactionOverride {
if o == nil {
var ret []TransactionOverride
return ret
}
return o.Transactions
}
// GetTransactionsOk returns a tuple with the Transactions field value
// and a boolean to check if the value has been set.
func (o *OverrideAccounts) GetTransactionsOk() (*[]TransactionOverride, bool) {
if o == nil {
return nil, false
}
return &o.Transactions, true
}
// SetTransactions sets field value
func (o *OverrideAccounts) SetTransactions(v []TransactionOverride) {
o.Transactions = v
}
// GetHoldings returns the Holdings field value if set, zero value otherwise.
func (o *OverrideAccounts) GetHoldings() HoldingsOverride {
if o == nil || o.Holdings == nil {
var ret HoldingsOverride
return ret
}
return *o.Holdings
}
// GetHoldingsOk returns a tuple with the Holdings field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *OverrideAccounts) GetHoldingsOk() (*HoldingsOverride, bool) {
if o == nil || o.Holdings == nil {
return nil, false
}
return o.Holdings, true
}
// HasHoldings returns a boolean if a field has been set.
func (o *OverrideAccounts) HasHoldings() bool {
if o != nil && o.Holdings != nil {
return true
}
return false
}
// SetHoldings gets a reference to the given HoldingsOverride and assigns it to the Holdings field.
func (o *OverrideAccounts) SetHoldings(v HoldingsOverride) {
o.Holdings = &v
}
// GetInvestmentTransactions returns the InvestmentTransactions field value if set, zero value otherwise.
func (o *OverrideAccounts) GetInvestmentTransactions() InvestmentsTransactionsOverride {
if o == nil || o.InvestmentTransactions == nil {
var ret InvestmentsTransactionsOverride
return ret
}
return *o.InvestmentTransactions
}
// GetInvestmentTransactionsOk returns a tuple with the InvestmentTransactions field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *OverrideAccounts) GetInvestmentTransactionsOk() (*InvestmentsTransactionsOverride, bool) {
if o == nil || o.InvestmentTransactions == nil {
return nil, false
}
return o.InvestmentTransactions, true
}
// HasInvestmentTransactions returns a boolean if a field has been set.
func (o *OverrideAccounts) HasInvestmentTransactions() bool {
if o != nil && o.InvestmentTransactions != nil {
return true
}
return false
}
// SetInvestmentTransactions gets a reference to the given InvestmentsTransactionsOverride and assigns it to the InvestmentTransactions field.
func (o *OverrideAccounts) SetInvestmentTransactions(v InvestmentsTransactionsOverride) {
o.InvestmentTransactions = &v
}
// GetIdentity returns the Identity field value
func (o *OverrideAccounts) GetIdentity() OwnerOverride {
if o == nil {
var ret OwnerOverride
return ret
}
return o.Identity
}
// GetIdentityOk returns a tuple with the Identity field value
// and a boolean to check if the value has been set.
func (o *OverrideAccounts) GetIdentityOk() (*OwnerOverride, bool) {
if o == nil {
return nil, false
}
return &o.Identity, true
}
// SetIdentity sets field value
func (o *OverrideAccounts) SetIdentity(v OwnerOverride) {
o.Identity = v
}
// GetLiability returns the Liability field value
func (o *OverrideAccounts) GetLiability() LiabilityOverride {
if o == nil {
var ret LiabilityOverride
return ret
}
return o.Liability
}
// GetLiabilityOk returns a tuple with the Liability field value
// and a boolean to check if the value has been set.
func (o *OverrideAccounts) GetLiabilityOk() (*LiabilityOverride, bool) {
if o == nil {
return nil, false
}
return &o.Liability, true
}
// SetLiability sets field value
func (o *OverrideAccounts) SetLiability(v LiabilityOverride) {
o.Liability = v
}
// GetInflowModel returns the InflowModel field value
func (o *OverrideAccounts) GetInflowModel() InflowModel {
if o == nil {
var ret InflowModel
return ret
}
return o.InflowModel
}
// GetInflowModelOk returns a tuple with the InflowModel field value
// and a boolean to check if the value has been set.
func (o *OverrideAccounts) GetInflowModelOk() (*InflowModel, bool) {
if o == nil {
return nil, false
}
return &o.InflowModel, true
}
// SetInflowModel sets field value
func (o *OverrideAccounts) SetInflowModel(v InflowModel) {
o.InflowModel = v
}
// GetIncome returns the Income field value if set, zero value otherwise.
func (o *OverrideAccounts) GetIncome() IncomeOverride {
if o == nil || o.Income == nil {
var ret IncomeOverride
return ret
}
return *o.Income
}
// GetIncomeOk returns a tuple with the Income field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *OverrideAccounts) GetIncomeOk() (*IncomeOverride, bool) {
if o == nil || o.Income == nil {
return nil, false
}
return o.Income, true
}
// HasIncome returns a boolean if a field has been set.
func (o *OverrideAccounts) HasIncome() bool {
if o != nil && o.Income != nil {
return true
}
return false
}
// SetIncome gets a reference to the given IncomeOverride and assigns it to the Income field.
func (o *OverrideAccounts) SetIncome(v IncomeOverride) {
o.Income = &v
}
func (o OverrideAccounts) MarshalJSON() ([]byte, error) {
toSerialize := map[string]interface{}{}
if true {
toSerialize["type"] = o.Type
}
if true {
toSerialize["subtype"] = o.Subtype.Get()
}
if true {
toSerialize["starting_balance"] = o.StartingBalance
}
if true {
toSerialize["force_available_balance"] = o.ForceAvailableBalance
}
if true {
toSerialize["currency"] = o.Currency
}
if true {
toSerialize["meta"] = o.Meta
}
if true {
toSerialize["numbers"] = o.Numbers
}
if true {
toSerialize["transactions"] = o.Transactions
}
if o.Holdings != nil {
toSerialize["holdings"] = o.Holdings
}
if o.InvestmentTransactions != nil {
toSerialize["investment_transactions"] = o.InvestmentTransactions
}
if true {
toSerialize["identity"] = o.Identity
}
if true {
toSerialize["liability"] = o.Liability
}
if true {
toSerialize["inflow_model"] = o.InflowModel
}
if o.Income != nil {
toSerialize["income"] = o.Income
}
for key, value := range o.AdditionalProperties {
toSerialize[key] = value
}
return json.Marshal(toSerialize)
}
func (o *OverrideAccounts) UnmarshalJSON(bytes []byte) (err error) {
varOverrideAccounts := _OverrideAccounts{}
if err = json.Unmarshal(bytes, &varOverrideAccounts); err == nil {
*o = OverrideAccounts(varOverrideAccounts)
}
additionalProperties := make(map[string]interface{})
if err = json.Unmarshal(bytes, &additionalProperties); err == nil {
delete(additionalProperties, "type")
delete(additionalProperties, "subtype")
delete(additionalProperties, "starting_balance")
delete(additionalProperties, "force_available_balance")
delete(additionalProperties, "currency")
delete(additionalProperties, "meta")
delete(additionalProperties, "numbers")
delete(additionalProperties, "transactions")
delete(additionalProperties, "holdings")
delete(additionalProperties, "investment_transactions")
delete(additionalProperties, "identity")
delete(additionalProperties, "liability")
delete(additionalProperties, "inflow_model")
delete(additionalProperties, "income")
o.AdditionalProperties = additionalProperties
}
return err
}
type NullableOverrideAccounts struct {
value *OverrideAccounts
isSet bool
}
func (v NullableOverrideAccounts) Get() *OverrideAccounts {
return v.value
}
func (v *NullableOverrideAccounts) Set(val *OverrideAccounts) {
v.value = val
v.isSet = true
}
func (v NullableOverrideAccounts) IsSet() bool {
return v.isSet
}
func (v *NullableOverrideAccounts) Unset() {
v.value = nil
v.isSet = false
}
func NewNullableOverrideAccounts(val *OverrideAccounts) *NullableOverrideAccounts {
return &NullableOverrideAccounts{value: val, isSet: true}
}
func (v NullableOverrideAccounts) MarshalJSON() ([]byte, error) {
return json.Marshal(v.value)
}
func (v *NullableOverrideAccounts) UnmarshalJSON(src []byte) error {
v.isSet = true
return json.Unmarshal(src, &v.value)
} | plaid/model_override_accounts.go | 0.830078 | 0.452173 | model_override_accounts.go | starcoder |
package v1alpha1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// NodeObservabilityRunSpec defines the desired state of NodeObservabilityRun
type NodeObservabilityRunSpec struct {
// NodeObservabilityRef is the reference to the parent NodeObservability resource
NodeObservabilityRef *NodeObservabilityRef `json:"nodeObservabilityRef"`
}
// NodeObservabilityRef is the reference to the parent NodeObservability resource
type NodeObservabilityRef struct {
// Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names
Name string `json:"name"`
}
// NodeObservabilityRunStatus defines the observed state of NodeObservabilityRun
type NodeObservabilityRunStatus struct {
// StartTimestamp represents the server time when the NodeObservabilityRun started.
// When not set, the NodeObservabilityRun hasn't started.
// It is represented in RFC3339 form and is in UTC.
StartTimestamp *metav1.Time `json:"startTimestamp,omitempty"`
// FinishedTimestamp represents the server time when the NodeObservabilityRun finished.
// When not set, the NodeObservabilityRun isn't known to have finished.
// It is represented in RFC3339 form and is in UTC.
FinishedTimestamp *metav1.Time `json:"finishedTimestamp,omitempty"`
// Agents represents the list of Nodes that are included in this Run.
// Agents are Pods, and as such, not all are always ready/available
Agents []AgentNode `json:"agents,omitempty"`
// FailedAgents represents the list of Nodes that could not be included in this Run
// This could be due to Node/Pod/Network failure
FailedAgents []AgentNode `json:"failedAgents,omitempty"`
// Conditions contain details for aspects of the current state of this API Resource.
ConditionalStatus `json:"conditions,omitempty"`
// Output is the output location of this NodeObservabilityRun
// When not set, no output location is known
Output *string `json:"output,omitempty"`
}
type AgentNode struct {
Name string `json:"name,omitempty"`
IP string `json:"ip,omitempty"`
Port int32 `json:"port,omitempty"`
}
// +kubebuilder:printcolumn:JSONPath=".spec.nodeObservabilityRef.name", name="NodeObservabilityRef", type="string"
//+kubebuilder:resource:shortName=nobr
// +kubebuilder:object:root=true
// +kubebuilder:subresource:status
// NodeObservabilityRun is a request to run observability actions on the
// nodes previously selected in NodeObservability resource
type NodeObservabilityRun struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec NodeObservabilityRunSpec `json:"spec,omitempty"`
Status NodeObservabilityRunStatus `json:"status,omitempty"`
}
//+kubebuilder:object:root=true
// NodeObservabilityRunList contains a list of NodeObservabilityRun
type NodeObservabilityRunList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []NodeObservabilityRun `json:"items"`
}
func init() {
SchemeBuilder.Register(&NodeObservabilityRun{}, &NodeObservabilityRunList{})
} | api/v1alpha1/nodeobservabilityrun_types.go | 0.85129 | 0.42322 | nodeobservabilityrun_types.go | starcoder |
package tart
// The Chande momentum oscillator is a technical momentum
// indicator introduced by <NAME> in his 1994 book
// The New Technical Trader. The formula calculates the
// difference between the sum of recent gains and the sum
// of recent losses and then divides the result by the sum
// of all price movements over the same period.
// https://www.investopedia.com/terms/c/chandemomentumoscillator.asp
// https://www.fidelity.com/learning-center/trading-investing/technical-analysis/technical-indicator-guide/cmo
type Cmo struct {
n int64
initPeriod int64
su *Ema
sd *Ema
prevC float64
sz int64
}
func NewCmo(n int64) *Cmo {
k := 1.0 / float64(n)
su := NewEma(n, k)
sd := NewEma(n, k)
a := su.InitPeriod()
b := sd.InitPeriod()
if a < b {
a = b
}
return &Cmo{
n: n,
initPeriod: a,
su: su,
sd: sd,
prevC: 0,
sz: 0,
}
}
func (c *Cmo) Update(v float64) float64 {
c.sz++
d := v - c.prevC
c.prevC = v
if c.sz == 1 {
return 0
}
var asu, asd float64
if d > 0 {
asu = c.su.Update(d)
asd = c.sd.Update(0)
} else {
asu = c.su.Update(0)
asd = c.sd.Update(-d)
}
sum := asu + asd
if almostZero(sum) {
return 0
}
return (asu - asd) / sum * 100.0
}
func (c *Cmo) InitPeriod() int64 {
return c.initPeriod
}
func (c *Cmo) Valid() bool {
return c.sz > c.initPeriod
}
// The Chande momentum oscillator is a technical momentum
// indicator introduced by <NAME> in his 1994 book
// The New Technical Trader. The formula calculates the
// difference between the sum of recent gains and the sum
// of recent losses and then divides the result by the sum
// of all price movements over the same period.
// https://www.investopedia.com/terms/c/chandemomentumoscillator.asp
// https://www.fidelity.com/learning-center/trading-investing/technical-analysis/technical-indicator-guide/cmo
func CmoArr(in []float64, n int64) []float64 {
out := make([]float64, len(in))
c := NewCmo(n)
for i, v := range in {
out[i] = c.Update(v)
}
return out
} | cmo.go | 0.718496 | 0.431405 | cmo.go | starcoder |
package avltree
type removeData struct {
lookingFor interface{}
compare CompareFunc
}
func findPredecessor(node *treeNode) *treeNode {
if node != nil {
pred := node.left
if pred != nil {
for pred.right != nil {
pred = pred.right
}
}
return pred
}
return nil
}
func remLeftSubBalance(node *treeNode, shorter bool) (*treeNode, bool) {
q := node.right // q: root of taller subtree
var w *treeNode
switch q.bal {
case equal:
node.bal = rightHigh
q.bal = leftHigh // q will be the new root node
node = rotateLeft(node)
shorter = false // next level not shorter
case rightHigh:
node.bal = equal
q.bal = equal // q will be the new root node
node = rotateLeft(node)
case leftHigh:
w = q.left
if w.bal == leftHigh {
q.bal = rightHigh
} else {
q.bal = equal
}
if w.bal == rightHigh {
node.bal = leftHigh
} else {
node.bal = equal
}
w.bal = equal // w will be the new root node
q = rotateRight(q)
node.right = q
node = rotateLeft(node)
}
return node, shorter
}
func remRightSubBalance(node *treeNode, shorter bool) (*treeNode, bool) {
q := node.left // q: root of taller subtree
var w *treeNode
switch q.bal {
case equal:
node.bal = leftHigh
q.bal = rightHigh // q will be the new root node
node = rotateRight(node)
shorter = false // next level not shorter
case leftHigh:
node.bal = equal
q.bal = equal // q will be the new root node
node = rotateRight(node)
case rightHigh:
w = q.right
if w.bal == rightHigh {
q.bal = leftHigh
} else {
q.bal = equal
}
if w.bal == leftHigh {
node.bal = rightHigh
} else {
node.bal = equal
}
w.bal = equal // w will be the new root node
q = rotateLeft(q)
node.left = q
node = rotateRight(node)
}
return node, shorter
}
func removePredecessor(node *treeNode, shorter bool) (*treeNode, bool) {
if node.right != nil {
node.right, shorter = removePredecessor(node.right, shorter)
if shorter { // left subtree was shortened
node, shorter = remRightBalance(node, shorter)
}
node.size = node.leftSize() + node.rightSize() + 1
} else {
node = remNode(node)
}
return node, shorter
}
func remLeftBalance(node *treeNode, shorter bool) (*treeNode, bool) {
switch node.bal {
case equal: // one subtree shortened
node.bal = rightHigh // now it's right high
shorter = false // overall tree same height
case leftHigh: // taller subtree shortened
node.bal = equal // now it's equal
case rightHigh: // shorter subtree shortened
node, shorter = remLeftSubBalance(node, shorter)
}
return node, shorter
}
func remRightBalance(node *treeNode, shorter bool) (*treeNode, bool) {
switch node.bal {
case equal: // one subtree shortened
node.bal = leftHigh // now it's left high
shorter = false // overall tree same height
case rightHigh: // taller subtree shortened
node.bal = equal // now it's equal
case leftHigh: // shorter subtree shortened
node, shorter = remRightSubBalance(node, shorter)
}
return node, shorter
}
func remNode(node *treeNode) *treeNode {
if node.left != nil {
node = node.left
} else if node.right != nil {
node = node.right
} else {
node = nil
}
return node
}
func (d *removeData) remove(node **treeNode, shorter *bool) interface{} {
*shorter = true // default: shorter
var ptr interface{}
ptr = nil
code := d.compare(d.lookingFor, (*node).value)
if code < 0 {
if (*node).left != nil {
ptr = d.remove(&((*node).left), shorter)
if *shorter && ptr != nil { // left subtree was shortened
*node, *shorter = remLeftBalance(*node, *shorter)
}
}
} else if code > 0 {
if (*node).right != nil {
ptr = d.remove(&((*node).right), shorter)
if *shorter && ptr != nil { // left subtree was shortened
*node, *shorter = remRightBalance(*node, *shorter)
}
}
} else {
ptr = (*node).value
if (*node).left != nil && (*node).right != nil { // do the switch to find the prev.
// node with only one subtree
pred := findPredecessor(*node)
(*node).value = pred.value
(*node).left, *shorter = removePredecessor((*node).left, *shorter)
if *shorter { // left subtree was shortened
*node, *shorter = remLeftBalance(*node, *shorter)
}
} else { // we found the node; it has 1 subtree
*node = remNode(*node)
}
}
if ptr != nil && *node != nil {
(*node).size = (*node).leftSize() + (*node).rightSize() + 1
}
return ptr
}
// Remove removes the element matching the given value.
func (t *Tree) Remove(ptr interface{}) interface{} {
if ptr != nil && t.root != nil {
d := &removeData{ptr, t.compare}
var shorter bool
return d.remove(&(t.root), &shorter)
}
return nil
}
func remove(node **treeNode, index int, shorter *bool) interface{} {
*shorter = true // default: shorter
var ptr interface{}
ptr = nil
if index < (*node).leftSize() {
if (*node).left != nil {
ptr = remove(&((*node).left), index, shorter)
if *shorter && ptr != nil { // left subtree was shortened
*node, *shorter = remLeftBalance(*node, *shorter)
}
}
} else if index == (*node).leftSize() {
ptr = (*node).value
if (*node).left != nil && (*node).right != nil { // do the switch to find the prev.
// node with only one subtree
pred := findPredecessor(*node)
(*node).value = pred.value
(*node).left, *shorter = removePredecessor((*node).left, *shorter)
if *shorter { // left subtree was shortened
*node, *shorter = remLeftBalance(*node, *shorter)
}
} else { // we found the node; it has 1 subtree
*node = remNode(*node)
}
} else {
if (*node).right != nil {
ptr = remove(&((*node).right), index-((*node).leftSize()+1), shorter)
if *shorter && ptr != nil { // left subtree was shortened
*node, *shorter = remRightBalance(*node, *shorter)
}
}
}
if ptr != nil && *node != nil {
(*node).size = (*node).leftSize() + (*node).rightSize() + 1
}
return ptr
}
// RemoveAt removes the element at the given index.
func (t *Tree) RemoveAt(index int) interface{} {
if t.root != nil && index < t.root.size && index >= 0 {
var shorter bool
return remove(&(t.root), index, &shorter)
}
return nil
} | vendor/github.com/ancientlore/go-avltree/treeremove.go | 0.720762 | 0.431584 | treeremove.go | starcoder |
package nvm
import (
"math"
"strconv"
)
// FloatEpsilon represents difference between 1 and the least value greater than 1 that is representable.
var FloatEpsilon = 1e-8
// IsNEqual reports whether floats `x` and `y` are equal.
// `NaN != NaN`, `(-)Inf != (-)Inf`
func IsNEqual(x, y float64) bool {
if math.Abs(x-y) <= FloatEpsilon {
return true
}
return false
}
// NaN returns "Not-a-Number" value.
func NaN() float64 {
return math.NaN()
}
// IsNaN reports whether `f` is a "Not-a-Number" value.
func IsNaN(f float64) bool {
return math.IsNaN(f)
}
// PosInf returns positive infinity.
func PosInf() float64 {
return math.Inf(1)
}
// NegInf returns negative infinity.
func NegInf() float64 {
return math.Inf(-1)
}
// IsInf reports whether `f` is positive or negative infinity.
func IsInf(f float64) bool {
return math.IsInf(f, 0)
}
// NtoS converts the floating-point number `f` to a string,
// it looks like (-ddd.dddd, no exponent) or (-d.dddde±dd, a decimal exponent, for large exponents).
func NtoS(f float64) string {
return strconv.FormatFloat(f, 'g', -1, 64)
}
// NtoS2 converts the floating-point number `f` to a string,
// it looks like (-ddd.dddd, no exponent),
// `d` is the number of digits after the decimal point.
func NtoS2(f float64, d int) string {
return strconv.FormatFloat(f, 'f', d, 64)
}
// NtoSci converts the floating-point number `f` to a scientific notation,
// it looks like (-d.dddde±dd, a decimal exponent).
func NtoSci(f float64) string {
return strconv.FormatFloat(f, 'e', -1, 64)
}
// NtoSci2 converts the floating-point number `f` to a scientific notation,
// it looks like (-d.dddde±dd, a decimal exponent),
// `d` is the number of digits after the decimal point.
func NtoSci2(f float64, d int) string {
return strconv.FormatFloat(f, 'e', d, 64)
}
// StoN converts the string `s` to a floating-point number,
// the string can be -ddd.dddd, -d.dddde±dd, Inf, -Inf, NaN.
func StoN(s string) float64 {
f, err := strconv.ParseFloat(s, 64)
if err != nil {
return NaN()
}
return f
}
// NtoICeil returns the integer, toward +Inf.
func NtoICeil(f float64) float64 {
return math.Ceil(f)
}
// NtoIFloor returns the integer, toward -Inf.
func NtoIFloor(f float64) float64 {
return math.Floor(f)
}
// NtoI returns the integer, toward zero.
func NtoI(f float64) float64 {
if f < 0 {
return NtoICeil(f)
}
return NtoIFloor(f)
}
// NtoIRound returns the rounding nearest integer, toward zero.
func NtoIRound(f float64) float64 {
return math.Round(f)
} | nvm/float.go | 0.907646 | 0.628635 | float.go | starcoder |
package enums
import (
"fmt"
"io"
"strconv"
)
// FilterSortDataType defines the various Filter data types
type FilterSortDataType string
// Note: the constant values should match the table field name
const (
// FilterSortDataTypeCreatedAt represents created at Filter data type
FilterSortDataTypeCreatedAt FilterSortDataType = "created"
// FilterSortDataTypeUpdatedAt represents updated at Filter data type
FilterSortDataTypeUpdatedAt FilterSortDataType = "updated"
// FilterSortDataTypeName represents a Name Filter data type
FilterSortDataTypeName FilterSortDataType = "name"
// FilterSortDataTypeMFLCode represents an MFL Code Filter data type
FilterSortDataTypeMFLCode FilterSortDataType = "mfl_code"
// FilterSortDataTypeActive represents the Active Filter data type
FilterSortDataTypeActive FilterSortDataType = "active"
// FilterSortDataTypeCounty represents the County Filter data type
FilterSortDataTypeCounty FilterSortDataType = "county"
// Other Filter data Types
)
// FacilityFilterDataTypes represents a slice of all possible `FilterDataTypes` values
var FacilityFilterDataTypes = []FilterSortDataType{
FilterSortDataTypeName,
FilterSortDataTypeMFLCode,
FilterSortDataTypeActive,
FilterSortDataTypeCounty,
}
// FacilitySortDataTypes represents a slice of all possible `SortDataTypes` values
var FacilitySortDataTypes = []FilterSortDataType{
FilterSortDataTypeCreatedAt,
FilterSortDataTypeUpdatedAt,
FilterSortDataTypeName,
FilterSortDataTypeMFLCode,
FilterSortDataTypeActive,
FilterSortDataTypeCounty,
}
// IsValid returns true if an Filter data type is valid
func (e FilterSortDataType) IsValid() bool {
switch e {
case FilterSortDataTypeCreatedAt,
FilterSortDataTypeUpdatedAt,
FilterSortDataTypeName,
FilterSortDataTypeMFLCode,
FilterSortDataTypeActive,
FilterSortDataTypeCounty:
return true
}
return false
}
// String ...
func (e FilterSortDataType) String() string {
return string(e)
}
// UnmarshalGQL converts the supplied value to a filter data type.
func (e *FilterSortDataType) UnmarshalGQL(v interface{}) error {
str, ok := v.(string)
if !ok {
return fmt.Errorf("enums must be strings")
}
*e = FilterSortDataType(str)
if !e.IsValid() {
return fmt.Errorf("%s is not a valid FilterSortDataType", str)
}
return nil
}
// MarshalGQL writes the metric type to the supplied writer
func (e FilterSortDataType) MarshalGQL(w io.Writer) {
fmt.Fprint(w, strconv.Quote(e.String()))
} | pkg/mycarehub/application/enums/filter_type.go | 0.748812 | 0.638286 | filter_type.go | starcoder |
package nodes
/*
Package nodes provides information and interaction with the nodes API
resource in the OpenStack Bare Metal service.
// Example to List Nodes with Detail
nodes.ListDetail(client, nodes.ListOpts{}).EachPage(func(page pagination.Page) (bool, error) {
nodeList, err := nodes.ExtractNodes(page)
if err != nil {
return false, err
}
for _, n := range nodeList {
// Do something
}
return true, nil
})
// Example to List Nodes
nodes.List(client, nodes.ListOpts{
ProvisionState: Deploying,
Fields: []string{"name"},
}).EachPage(func(page pagination.Page) (bool, error) {
nodeList, err := nodes.ExtractNodes(page)
if err != nil {
return false, err
}
for _, n := range nodeList {
// Do something
}
return true, nil
})
// Example to Create Node
createNode, err := nodes.Create(client, nodes.CreateOpts{
Driver: "ipmi",
BootInterface: "pxe",
Name: "coconuts",
DriverInfo: map[string]interface{}{
"ipmi_port": "6230",
"ipmi_username": "admin",
"deploy_kernel": "http://172.22.0.1/images/tinyipa-stable-rocky.vmlinuz",
"ipmi_address": "192.168.122.1",
"deploy_ramdisk": "http://172.22.0.1/images/tinyipa-stable-rocky.gz",
"ipmi_password": "<PASSWORD>",
},
}).Extract()
if err != nil {
panic(err)
}
// Example to Get Node
showNode, err := nodes.Get(client, "c9afd385-5d89-4ecb-9e1c-68194da6b474").Extract()
if err != nil {
panic(err)
}
// Example to Update Node
updateNode, err := nodes.Update(client, "c9afd385-5d89-4ecb-9e1c-68194da6b474", nodes.UpdateOpts{
nodes.UpdateOperation{
Op: ReplaceOp,
Path: "/maintenance",
Value: "true",
},
}).Extract()
if err != nil {
panic(err)
}
// Example to Delete Node
err = nodes.Delete(client, "c9afd385-5d89-4ecb-9e1c-68194da6b474").ExtractErr()
if err != nil {
panic(err)
}
// Example to Validate Node
validation, err := nodes.Validate(client, "a62b8495-52e2-407b-b3cb-62775d04c2b8").Extract()
// Example to inject non-masking interrupts
err := nodes.InjectNMI(client, "a62b8495-52e2-407b-b3cb-62775d04c2b8").ExtractErr()
// Example to get array of supported boot devices for a node
bootDevices, err := nodes.GetSupportedBootDevices(client, "a62b8495-52e2-407b-b3cb-62775d04c2b8").Extract()
// Example to set boot device for a node
err := nodes.SetBootDevice(client, "a62b8495-52e2-407b-b3cb-62775d04c2b8", nodes.BootDeviceOpts{
BootDevice: "pxe",
Persistent: false,
})
// Example to get boot device for a node
bootDevice, err := nodes.GetBootDevice(client, "a62b8495-52e2-407b-b3cb-62775d04c2b8").Extract()
*/ | pkg/terraform/exec/plugins/vendor/github.com/gophercloud/gophercloud/openstack/baremetal/v1/nodes/doc.go | 0.697712 | 0.557123 | doc.go | starcoder |
package dungeon
import (
"fmt"
"geometry"
"math"
"math/rand"
)
// Room is a room in the dungeon
type Room struct {
Width, Height float64
TopLeft, BottomRight geometry.Point
locs []Loc
}
// NewRoom creates a Room with specified dimensions
func NewRoom(x, y, width, height int) *Room {
return &Room{
Width: float64(width),
Height: float64(height),
TopLeft: geometry.Point{float64(x), float64(y)},
BottomRight: geometry.Point{float64(x + width), float64(y + height)},
locs: []Loc{},
}
}
// Center calculates the center of a Room
func (r *Room) Center() geometry.Point {
diff := r.BottomRight.Diff(r.TopLeft)
return geometry.Point{X: r.TopLeft.X + diff.X/2, Y: r.TopLeft.Y + diff.Y/2}
}
// Point implements the geometry.Pointer interface
func (pointer *Room) Point() geometry.Point {
return pointer.Center()
}
// IsInside determines whether a geometry.Pointer is inside the Room
func (r *Room) IsInside(p geometry.Pointer) bool {
point := p.Point()
return r.TopLeft.X <= point.X &&
r.BottomRight.X >= point.X &&
r.TopLeft.Y <= point.Y &&
r.BottomRight.Y >= point.Y
}
// Overlap determines whether or not another room (including spacing tolerance) overlaps this Room
func (r *Room) Overlap(o *Room, spacing float64) bool {
return r.TopLeft.X-spacing <= o.BottomRight.X &&
r.BottomRight.X+spacing >= o.TopLeft.X &&
r.TopLeft.Y-spacing <= o.BottomRight.Y &&
r.BottomRight.Y+spacing >= o.TopLeft.Y
}
func (room *Room) PlaceLoc(r rune) {
room.locs = append(room.locs, Loc{room.randomPoint(), r})
}
func (r *Room) randomPoint() geometry.Pointer {
return geometry.Point{
X: r.TopLeft.X + float64(rand.Intn(int(r.Width))),
Y: r.TopLeft.Y + float64(rand.Intn(int(r.Height))),
}
}
func (r *Room) String() string {
diff := r.TopLeft.Diff(r.BottomRight)
return fmt.Sprintf("%v (%vx%v)", r.TopLeft, math.Abs(float64(diff.X)), math.Abs(float64(diff.Y)))
}
// Draw writes the Room to a buffer
func (r *Room) Draw(b [][]rune) {
for col := r.TopLeft.Y; col < r.BottomRight.Y; col++ {
for row := r.TopLeft.X; row < r.BottomRight.X; row++ {
b[int(col)][int(row)] = empty
}
}
for _, l := range r.locs {
p := l.Point()
b[int(p.Y)][int(p.X)] = l.Symbol
}
}
type Loc struct {
geometry.Pointer
Symbol rune
} | src/dungeon/room.go | 0.80969 | 0.471527 | room.go | starcoder |
package iso20022
// Cash movements from or to a fund as a result of investment funds transactions, eg, subscriptions or redemptions.
type EstimatedFundCashForecast1 struct {
// Date and, if required, the time, at which the price has been applied.
TradeDateTime *DateAndDateTimeChoice `xml:"TradDtTm"`
// Previous date and time at which a price was applied.
PreviousTradeDateTime *DateAndDateTimeChoice `xml:"PrvsTradDtTm"`
// Investment fund class to which a cash flow is related.
FinancialInstrumentDetails *FinancialInstrument5 `xml:"FinInstrmDtls"`
// Estimated total value of all the holdings, less the fund's liabilities, attributable to a specific investment fund class.
EstimatedTotalNAV *ActiveOrHistoricCurrencyAndAmount `xml:"EstmtdTtlNAV,omitempty"`
// Previous estimated value of all the holdings, less the fund's liabilities, attributable to a specific investment fund class.
PreviousEstimatedTotalNAV *ActiveOrHistoricCurrencyAndAmount `xml:"PrvsEstmtdTtlNAV,omitempty"`
// Estimated total number of investment fund class units that have been issued.
EstimatedTotalUnitsNumber *FinancialInstrumentQuantity1 `xml:"EstmtdTtlUnitsNb,omitempty"`
// Previous estimated value of all the holdings, less the fund's liabilities, attributable to a specific investment fund class.
PreviousEstimatedTotalUnitsNumber *FinancialInstrumentQuantity1 `xml:"PrvsEstmtdTtlUnitsNb,omitempty"`
// Rate of change of the net asset value.
EstimatedTotalNAVChangeRate *PercentageRate `xml:"EstmtdTtlNAVChngRate,omitempty"`
// Currency of the investment fund class.
InvestmentCurrency []*ActiveOrHistoricCurrencyCode `xml:"InvstmtCcy,omitempty"`
// Indicates whether the estimated net cash flow is exceptional.
ExceptionalNetCashFlowIndicator *YesNoIndicator `xml:"XcptnlNetCshFlowInd"`
// Cash movements into a fund as a result of investment funds transactions, eg, subscriptions or switch-in.
EstimatedCashInForecastDetails []*CashInForecast2 `xml:"EstmtdCshInFcstDtls,omitempty"`
// Cash movements out of a fund as a result of investment funds transactions, eg, redemptions or switch-out.
EstimatedCashOutForecastDetails []*CashOutForecast2 `xml:"EstmtdCshOutFcstDtls,omitempty"`
// Net cash movements to a fund as a result of investment funds transactions.
EstimatedNetCashForecastDetails []*NetCashForecast1 `xml:"EstmtdNetCshFcstDtls,omitempty"`
}
func (e *EstimatedFundCashForecast1) AddTradeDateTime() *DateAndDateTimeChoice {
e.TradeDateTime = new(DateAndDateTimeChoice)
return e.TradeDateTime
}
func (e *EstimatedFundCashForecast1) AddPreviousTradeDateTime() *DateAndDateTimeChoice {
e.PreviousTradeDateTime = new(DateAndDateTimeChoice)
return e.PreviousTradeDateTime
}
func (e *EstimatedFundCashForecast1) AddFinancialInstrumentDetails() *FinancialInstrument5 {
e.FinancialInstrumentDetails = new(FinancialInstrument5)
return e.FinancialInstrumentDetails
}
func (e *EstimatedFundCashForecast1) SetEstimatedTotalNAV(value, currency string) {
e.EstimatedTotalNAV = NewActiveOrHistoricCurrencyAndAmount(value, currency)
}
func (e *EstimatedFundCashForecast1) SetPreviousEstimatedTotalNAV(value, currency string) {
e.PreviousEstimatedTotalNAV = NewActiveOrHistoricCurrencyAndAmount(value, currency)
}
func (e *EstimatedFundCashForecast1) AddEstimatedTotalUnitsNumber() *FinancialInstrumentQuantity1 {
e.EstimatedTotalUnitsNumber = new(FinancialInstrumentQuantity1)
return e.EstimatedTotalUnitsNumber
}
func (e *EstimatedFundCashForecast1) AddPreviousEstimatedTotalUnitsNumber() *FinancialInstrumentQuantity1 {
e.PreviousEstimatedTotalUnitsNumber = new(FinancialInstrumentQuantity1)
return e.PreviousEstimatedTotalUnitsNumber
}
func (e *EstimatedFundCashForecast1) SetEstimatedTotalNAVChangeRate(value string) {
e.EstimatedTotalNAVChangeRate = (*PercentageRate)(&value)
}
func (e *EstimatedFundCashForecast1) AddInvestmentCurrency(value string) {
e.InvestmentCurrency = append(e.InvestmentCurrency, (*ActiveOrHistoricCurrencyCode)(&value))
}
func (e *EstimatedFundCashForecast1) SetExceptionalNetCashFlowIndicator(value string) {
e.ExceptionalNetCashFlowIndicator = (*YesNoIndicator)(&value)
}
func (e *EstimatedFundCashForecast1) AddEstimatedCashInForecastDetails() *CashInForecast2 {
newValue := new(CashInForecast2)
e.EstimatedCashInForecastDetails = append(e.EstimatedCashInForecastDetails, newValue)
return newValue
}
func (e *EstimatedFundCashForecast1) AddEstimatedCashOutForecastDetails() *CashOutForecast2 {
newValue := new(CashOutForecast2)
e.EstimatedCashOutForecastDetails = append(e.EstimatedCashOutForecastDetails, newValue)
return newValue
}
func (e *EstimatedFundCashForecast1) AddEstimatedNetCashForecastDetails() *NetCashForecast1 {
newValue := new(NetCashForecast1)
e.EstimatedNetCashForecastDetails = append(e.EstimatedNetCashForecastDetails, newValue)
return newValue
} | EstimatedFundCashForecast1.go | 0.84367 | 0.541773 | EstimatedFundCashForecast1.go | starcoder |
package crypto
// encrypt.go contains functions for encrypting and decrypting data byte slices
// and readers.
import (
"crypto/cipher"
"encoding/json"
"errors"
"io"
"github.com/NebulousLabs/fastrand"
"golang.org/x/crypto/twofish"
)
const (
// TwofishOverhead is the number of bytes added by EncryptBytes
TwofishOverhead = 28
)
var (
// ErrInsufficientLen is an error when supplied ciphertext is not
// long enough to contain a nonce.
ErrInsufficientLen = errors.New("supplied ciphertext is not long enough to contain a nonce")
)
type (
// Ciphertext is an encrypted []byte.
Ciphertext []byte
// TwofishKey is a key used for encrypting and decrypting data.
TwofishKey [EntropySize]byte
)
// GenerateTwofishKey produces a key that can be used for encrypting and
// decrypting files.
func GenerateTwofishKey() (key TwofishKey) {
fastrand.Read(key[:])
return
}
// NewCipher creates a new Twofish cipher from the key.
func (key TwofishKey) NewCipher() cipher.Block {
// NOTE: NewCipher only returns an error if len(key) != 16, 24, or 32.
cipher, _ := twofish.NewCipher(key[:])
return cipher
}
// EncryptBytes encrypts a []byte using the key. EncryptBytes uses GCM and
// prepends the nonce (12 bytes) to the ciphertext.
func (key TwofishKey) EncryptBytes(plaintext []byte) Ciphertext {
// Create the cipher.
// NOTE: NewGCM only returns an error if twofishCipher.BlockSize != 16.
aead, _ := cipher.NewGCM(key.NewCipher())
// Create the nonce.
nonce := fastrand.Bytes(aead.NonceSize())
// Encrypt the data. No authenticated data is provided, as EncryptBytes is
// meant for file encryption.
return aead.Seal(nonce, nonce, plaintext, nil)
}
// DecryptBytes decrypts the ciphertext created by EncryptBytes. The nonce is
// expected to be the first 12 bytes of the ciphertext.
func (key TwofishKey) DecryptBytes(ct Ciphertext) ([]byte, error) {
// Create the cipher.
// NOTE: NewGCM only returns an error if twofishCipher.BlockSize != 16.
aead, _ := cipher.NewGCM(key.NewCipher())
// Check for a nonce.
if len(ct) < aead.NonceSize() {
return nil, ErrInsufficientLen
}
// Decrypt the data.
nonce := ct[:aead.NonceSize()]
ciphertext := ct[aead.NonceSize():]
return aead.Open(nil, nonce, ciphertext, nil)
}
// DecryptBytesInPlace decrypts the ciphertext created by EncryptBytes. The
// nonce is expected to be the first 12 bytes of the ciphertext.
// DecryptBytesInPlace reuses the memory of ct to be able to operate in-place.
// This means that ct can't be reused after calling DecryptBytesInPlace.
func (key TwofishKey) DecryptBytesInPlace(ct Ciphertext) ([]byte, error) {
// Create the cipher.
// NOTE: NewGCM only returns an error if twofishCipher.BlockSize != 16.
aead, _ := cipher.NewGCM(key.NewCipher())
// Check for a nonce.
if len(ct) < aead.NonceSize() {
return nil, ErrInsufficientLen
}
// Decrypt the data.
nonce := ct[:aead.NonceSize()]
ciphertext := ct[aead.NonceSize():]
return aead.Open(ciphertext[:0], nonce, ciphertext, nil)
}
// NewWriter returns a writer that encrypts or decrypts its input stream.
func (key TwofishKey) NewWriter(w io.Writer) io.Writer {
// OK to use a zero IV if the key is unique for each ciphertext.
iv := make([]byte, twofish.BlockSize)
stream := cipher.NewOFB(key.NewCipher(), iv)
return &cipher.StreamWriter{S: stream, W: w}
}
// NewReader returns a reader that encrypts or decrypts its input stream.
func (key TwofishKey) NewReader(r io.Reader) io.Reader {
// OK to use a zero IV if the key is unique for each ciphertext.
iv := make([]byte, twofish.BlockSize)
stream := cipher.NewOFB(key.NewCipher(), iv)
return &cipher.StreamReader{S: stream, R: r}
}
// MarshalJSON returns the JSON encoding of a CipherText
func (c Ciphertext) MarshalJSON() ([]byte, error) {
return json.Marshal([]byte(c))
}
// UnmarshalJSON parses the JSON-encoded b and returns an instance of
// CipherText.
func (c *Ciphertext) UnmarshalJSON(b []byte) error {
var umarB []byte
err := json.Unmarshal(b, &umarB)
if err != nil {
return err
}
*c = Ciphertext(umarB)
return nil
} | crypto/encrypt.go | 0.747984 | 0.447943 | encrypt.go | starcoder |
package emath
// MinInt find and return the minimum item from the specified int
func MinInt(nums ...int) int {
var ret = nums[0]
for i, l := 1, len(nums); i < l; i++ {
if ret > nums[i] {
ret = nums[i]
}
}
return ret
}
// MaxInt find and return the maximum item from the specified int
func MaxInt(nums ...int) int {
var ret = nums[0]
for i, l := 1, len(nums); i < l; i++ {
if ret < nums[i] {
ret = nums[i]
}
}
return ret
}
// MinByte find and return the minimum item from the specified byte
func MinByte(nums ...byte) byte {
var ret = nums[0]
for i, l := 1, len(nums); i < l; i++ {
if ret > nums[i] {
ret = nums[i]
}
}
return ret
}
// MaxByte find and return the maximum item from the specified byte
func MaxByte(nums ...byte) byte {
var ret = nums[0]
for i, l := 1, len(nums); i < l; i++ {
if ret < nums[i] {
ret = nums[i]
}
}
return ret
}
// MinInt8 find and return the minimum item from the specified int8
func MinInt8(nums ...int8) int8 {
var ret = nums[0]
for i, l := 1, len(nums); i < l; i++ {
if ret > nums[i] {
ret = nums[i]
}
}
return ret
}
// MaxInt8 find and return the maximum item from the specified int8
func MaxInt8(nums ...int8) int8 {
var ret = nums[0]
for i, l := 1, len(nums); i < l; i++ {
if ret < nums[i] {
ret = nums[i]
}
}
return ret
}
// MinUint8 find and return the minimum item from the specified uint8
func MinUint8(nums ...uint8) uint8 {
var ret = nums[0]
for i, l := 1, len(nums); i < l; i++ {
if ret > nums[i] {
ret = nums[i]
}
}
return ret
}
// MaxUint8 find and return the maximum item from the specified uint8
func MaxUint8(nums ...uint8) uint8 {
var ret = nums[0]
for i, l := 1, len(nums); i < l; i++ {
if ret < nums[i] {
ret = nums[i]
}
}
return ret
}
// MinInt16 find and return the minimum item from the specified int16
func MinInt16(nums ...int16) int16 {
var ret = nums[0]
for i, l := 1, len(nums); i < l; i++ {
if ret > nums[i] {
ret = nums[i]
}
}
return ret
}
// MaxInt16 find and return the maximum item from the specified int16
func MaxInt16(nums ...int16) int16 {
var ret = nums[0]
for i, l := 1, len(nums); i < l; i++ {
if ret < nums[i] {
ret = nums[i]
}
}
return ret
}
// MinUint16 find and return the minimum item from the specified uint16
func MinUint16(nums ...uint16) uint16 {
var ret = nums[0]
for i, l := 1, len(nums); i < l; i++ {
if ret > nums[i] {
ret = nums[i]
}
}
return ret
}
// MaxUint16 find and return the maximum item from the specified uint16
func MaxUint16(nums ...uint16) uint16 {
var ret = nums[0]
for i, l := 1, len(nums); i < l; i++ {
if ret < nums[i] {
ret = nums[i]
}
}
return ret
}
// MinInt32 find and return the minimum item from the specified int32
func MinInt32(nums ...int32) int32 {
var ret = nums[0]
for i, l := 1, len(nums); i < l; i++ {
if ret > nums[i] {
ret = nums[i]
}
}
return ret
}
// MaxInt32 find and return the maximum item from the specified int32
func MaxInt32(nums ...int32) int32 {
var ret = nums[0]
for i, l := 1, len(nums); i < l; i++ {
if ret < nums[i] {
ret = nums[i]
}
}
return ret
}
// MinUint32 find and return the minimum item from the specified int32
func MinUint32(nums ...uint32) uint32 {
var ret = nums[0]
for i, l := 1, len(nums); i < l; i++ {
if ret > nums[i] {
ret = nums[i]
}
}
return ret
}
// MaxUint32 find and return the maximum item from the specified uint32
func MaxUint32(nums ...uint32) uint32 {
var ret = nums[0]
for i, l := 1, len(nums); i < l; i++ {
if ret < nums[i] {
ret = nums[i]
}
}
return ret
}
// MinInt64 find and return the minimum item from the specified uint32
func MinInt64(nums ...int64) int64 {
var ret = nums[0]
for i, l := 1, len(nums); i < l; i++ {
if ret > nums[i] {
ret = nums[i]
}
}
return ret
}
// MaxInt64 find and return the maximum item from the specified int64
func MaxInt64(nums ...int64) int64 {
var ret = nums[0]
for i, l := 1, len(nums); i < l; i++ {
if ret < nums[i] {
ret = nums[i]
}
}
return ret
}
// MinUint64 find and return the minimum item from the specified uint64
func MinUint64(nums ...uint64) uint64 {
var ret = nums[0]
for i, l := 1, len(nums); i < l; i++ {
if ret > nums[i] {
ret = nums[i]
}
}
return ret
}
// MaxUint64 find and return the maximum item from the specified uint64
func MaxUint64(nums ...uint64) uint64 {
var ret = nums[0]
for i, l := 1, len(nums); i < l; i++ {
if ret < nums[i] {
ret = nums[i]
}
}
return ret
}
// MinFloat32 find and return the minimum item from the specified float32
func MinFloat32(nums ...float32) float32 {
var ret = nums[0]
for i, l := 1, len(nums); i < l; i++ {
if ret > nums[i] {
ret = nums[i]
}
}
return ret
}
// MaxFloat32 find and return the maximum item from the specified float32
func MaxFloat32(nums ...float32) float32 {
var ret = nums[0]
for i, l := 1, len(nums); i < l; i++ {
if ret < nums[i] {
ret = nums[i]
}
}
return ret
}
// MinFloat64 find and return the minimum item from the specified float64
func MinFloat64(nums ...float64) float64 {
var ret = nums[0]
for i, l := 1, len(nums); i < l; i++ {
if ret > nums[i] {
ret = nums[i]
}
}
return ret
}
// MaxFloat64 find and return the maximum item from the specified float64
func MaxFloat64(nums ...float64) float64 {
var ret = nums[0]
for i, l := 1, len(nums); i < l; i++ {
if ret < nums[i] {
ret = nums[i]
}
}
return ret
} | emath/math_minmax.go | 0.662796 | 0.459501 | math_minmax.go | starcoder |
package find_all_anagrams_in_a_string
/*
242. 有效的字母异位词 https://leetcode-cn.com/problems/valid-anagram
给定两个字符串 s 和 t ,编写一个函数来判断 t 是否是 s 的字母异位词。
示例 1:
输入: s = "anagram", t = "nagaram"
输出: true
示例 2:
输入: s = "rat", t = "car"
输出: false
说明:
你可以假设字符串只包含小写字母。
进阶:
如果输入字符串包含 unicode 字符怎么办?你能否调整你的解法来应对这种情况?
*/
/*
1.借助两个哈希表(因只是小写字母,也可用长度26的数组)
分别统计s、t中字符的个数,比较是否相同即可
*/
func isAnagram(s string, t string) bool {
const letterNums = 26
const firstLetter = 'a'
a := [letterNums]int{}
b := [letterNums]int{}
for _, v := range s {
a[v-firstLetter]++
}
for _, v := range t {
b[v-firstLetter]++
}
return a == b
}
/*
也可以只借助一个哈希表
先统计s里字符的个数,再遍历t,没一个字符在统计结果里将个数减1
最后遍历一遍统计结果,如果个数出现非0,大于0说明s里有比t多的字符,小于0说明t里有比s多的字符,直接返回false
遍历完毕,即所有字符个数为0,说明s与t是字符异位词
*/
func isAnagram1(s string, t string) bool {
const letterNums = 26
const firstLetter = 'a'
m := [letterNums]int{}
for _, v := range s {
m[v-firstLetter]++
}
for _, v := range t {
m[v-firstLetter]--
}
for _, v := range m {
if v != 0 {
return false
}
}
return true
}
/*
438. 找到字符串中所有字母异位词 https://leetcode-cn.com/problems/find-all-anagrams-in-a-string
给定一个字符串 s 和一个非空字符串 p,找到 s 中所有是 p 的字母异位词的子串,返回这些子串的起始索引。
字符串只包含小写英文字母,并且字符串 s 和 p 的长度都不超过 20100。
说明:
字母异位词指字母相同,但排列不同的字符串。
不考虑答案输出的顺序。
示例 1:
输入:
s: "cbaebabacd" p: "abc"
输出:
[0, 6]
解释:
起始索引等于 0 的子串是 "cba", 它是 "abc" 的字母异位词。
起始索引等于 6 的子串是 "bac", 它是 "abc" 的字母异位词。
示例 2:
输入:
s: "abab" p: "ab"
输出:
[0, 1, 2]
解释:
起始索引等于 0 的子串是 "ab", 它是 "ab" 的字母异位词。
起始索引等于 1 的子串是 "ba", 它是 "ab" 的字母异位词。
起始索引等于 2 的子串是 "ab", 它是 "ab" 的字母异位词。
*/
/*
双指针滑动窗口
*/
func findAnagrams(s string, p string) []int {
m, n := len(s), len(p)
if m < n {
return nil
}
needed := make(map[byte]int, 0) // 统计p中字符个数
for i := 0; i < n; i++ {
needed[p[i]]++
}
found := make(map[byte]int, len(needed))
matched := 0
var res []int
for left, right := 0, 0; right < m; right++ {
c := s[right]
if needed[c] > 0 {
found[c]++
if found[c] == needed[c] {
matched++
}
}
for matched == len(needed) {
if right-left == n-1 {
res = append(res, left)
}
c := s[left]
if needed[c] > 0 {
found[c]--
if found[c] < needed[c] {
matched--
}
}
left++
}
}
return res
} | solutions/find-all-anagrams-in-a-string/d.go | 0.588534 | 0.463505 | d.go | starcoder |
package gonpy
import (
"encoding/binary"
"fmt"
)
// GetComplex128 returns the array data as a slice of complex128 values.
func (rdr *NpyReader) GetComplex128() ([]complex128, error) {
if rdr.Dtype != "c16" {
return nil, fmt.Errorf("Reader does not contain complex128 data")
}
data := make([]complex128, rdr.nElt)
err := binary.Read(rdr.r, rdr.Endian, &data)
if err != nil {
return nil, err
}
return data, nil
}
// GetComplex64 returns the array data as a slice of complex64 values.
func (rdr *NpyReader) GetComplex64() ([]complex64, error) {
if rdr.Dtype != "c8" {
return nil, fmt.Errorf("Reader does not contain complex64 data")
}
data := make([]complex64, rdr.nElt)
err := binary.Read(rdr.r, rdr.Endian, &data)
if err != nil {
return nil, err
}
return data, nil
}
// GetFloat64 returns the array data as a slice of float64 values.
func (rdr *NpyReader) GetFloat64() ([]float64, error) {
if rdr.Dtype != "f8" {
return nil, fmt.Errorf("Reader does not contain float64 data")
}
data := make([]float64, rdr.nElt)
err := binary.Read(rdr.r, rdr.Endian, &data)
if err != nil {
return nil, err
}
return data, nil
}
// GetFloat32 returns the array data as a slice of float32 values.
func (rdr *NpyReader) GetFloat32() ([]float32, error) {
if rdr.Dtype != "f4" {
return nil, fmt.Errorf("Reader does not contain float32 data")
}
data := make([]float32, rdr.nElt)
err := binary.Read(rdr.r, rdr.Endian, &data)
if err != nil {
return nil, err
}
return data, nil
}
// GetUint64 returns the array data as a slice of uint64 values.
func (rdr *NpyReader) GetUint64() ([]uint64, error) {
if rdr.Dtype != "u8" {
return nil, fmt.Errorf("Reader does not contain uint64 data")
}
data := make([]uint64, rdr.nElt)
err := binary.Read(rdr.r, rdr.Endian, &data)
if err != nil {
return nil, err
}
return data, nil
}
// GetUint32 returns the array data as a slice of uint32 values.
func (rdr *NpyReader) GetUint32() ([]uint32, error) {
if rdr.Dtype != "u4" {
return nil, fmt.Errorf("Reader does not contain uint32 data")
}
data := make([]uint32, rdr.nElt)
err := binary.Read(rdr.r, rdr.Endian, &data)
if err != nil {
return nil, err
}
return data, nil
}
// GetUint16 returns the array data as a slice of uint16 values.
func (rdr *NpyReader) GetUint16() ([]uint16, error) {
if rdr.Dtype != "u2" {
return nil, fmt.Errorf("Reader does not contain uint16 data")
}
data := make([]uint16, rdr.nElt)
err := binary.Read(rdr.r, rdr.Endian, &data)
if err != nil {
return nil, err
}
return data, nil
}
// GetUint8 returns the array data as a slice of uint8 values.
func (rdr *NpyReader) GetUint8() ([]uint8, error) {
if rdr.Dtype != "u1" {
return nil, fmt.Errorf("Reader does not contain uint8 data")
}
data := make([]uint8, rdr.nElt)
err := binary.Read(rdr.r, rdr.Endian, &data)
if err != nil {
return nil, err
}
return data, nil
}
// GetInt64 returns the array data as a slice of int64 values.
func (rdr *NpyReader) GetInt64() ([]int64, error) {
if rdr.Dtype != "i8" {
return nil, fmt.Errorf("Reader does not contain int64 data")
}
data := make([]int64, rdr.nElt)
err := binary.Read(rdr.r, rdr.Endian, &data)
if err != nil {
return nil, err
}
return data, nil
}
// GetInt32 returns the array data as a slice of int32 values.
func (rdr *NpyReader) GetInt32() ([]int32, error) {
if rdr.Dtype != "i4" {
return nil, fmt.Errorf("Reader does not contain int32 data")
}
data := make([]int32, rdr.nElt)
err := binary.Read(rdr.r, rdr.Endian, &data)
if err != nil {
return nil, err
}
return data, nil
}
// GetInt16 returns the array data as a slice of int16 values.
func (rdr *NpyReader) GetInt16() ([]int16, error) {
if rdr.Dtype != "i2" {
return nil, fmt.Errorf("Reader does not contain int16 data")
}
data := make([]int16, rdr.nElt)
err := binary.Read(rdr.r, rdr.Endian, &data)
if err != nil {
return nil, err
}
return data, nil
}
// GetInt8 returns the array data as a slice of int8 values.
func (rdr *NpyReader) GetInt8() ([]int8, error) {
if rdr.Dtype != "i1" {
return nil, fmt.Errorf("Reader does not contain int8 data")
}
data := make([]int8, rdr.nElt)
err := binary.Read(rdr.r, rdr.Endian, &data)
if err != nil {
return nil, err
}
return data, nil
}
// WriteComplex128 writes a slice of complex128 values in npy format.
func (wtr *NpyWriter) WriteComplex128(data []complex128) error {
err := wtr.writeHeader("c16", len(data))
if err != nil {
return err
}
err = binary.Write(wtr.w, wtr.Endian, data)
if err != nil {
return err
}
wtr.w.Close()
return nil
}
// WriteComplex64 writes a slice of complex64 values in npy format.
func (wtr *NpyWriter) WriteComplex64(data []complex64) error {
err := wtr.writeHeader("c8", len(data))
if err != nil {
return err
}
err = binary.Write(wtr.w, wtr.Endian, data)
if err != nil {
return err
}
wtr.w.Close()
return nil
}
// WriteFloat64 writes a slice of float64 values in npy format.
func (wtr *NpyWriter) WriteFloat64(data []float64) error {
err := wtr.writeHeader("f8", len(data))
if err != nil {
return err
}
err = binary.Write(wtr.w, wtr.Endian, data)
if err != nil {
return err
}
wtr.w.Close()
return nil
}
// WriteFloat32 writes a slice of float32 values in npy format.
func (wtr *NpyWriter) WriteFloat32(data []float32) error {
err := wtr.writeHeader("f4", len(data))
if err != nil {
return err
}
err = binary.Write(wtr.w, wtr.Endian, data)
if err != nil {
return err
}
wtr.w.Close()
return nil
}
// WriteUint64 writes a slice of uint64 values in npy format.
func (wtr *NpyWriter) WriteUint64(data []uint64) error {
err := wtr.writeHeader("u8", len(data))
if err != nil {
return err
}
err = binary.Write(wtr.w, wtr.Endian, data)
if err != nil {
return err
}
wtr.w.Close()
return nil
}
// WriteUint32 writes a slice of uint32 values in npy format.
func (wtr *NpyWriter) WriteUint32(data []uint32) error {
err := wtr.writeHeader("u4", len(data))
if err != nil {
return err
}
err = binary.Write(wtr.w, wtr.Endian, data)
if err != nil {
return err
}
wtr.w.Close()
return nil
}
// WriteUint16 writes a slice of uint16 values in npy format.
func (wtr *NpyWriter) WriteUint16(data []uint16) error {
err := wtr.writeHeader("u2", len(data))
if err != nil {
return err
}
err = binary.Write(wtr.w, wtr.Endian, data)
if err != nil {
return err
}
wtr.w.Close()
return nil
}
// WriteUint8 writes a slice of uint8 values in npy format.
func (wtr *NpyWriter) WriteUint8(data []uint8) error {
err := wtr.writeHeader("u1", len(data))
if err != nil {
return err
}
err = binary.Write(wtr.w, wtr.Endian, data)
if err != nil {
return err
}
wtr.w.Close()
return nil
}
// WriteInt64 writes a slice of int64 values in npy format.
func (wtr *NpyWriter) WriteInt64(data []int64) error {
err := wtr.writeHeader("i8", len(data))
if err != nil {
return err
}
err = binary.Write(wtr.w, wtr.Endian, data)
if err != nil {
return err
}
wtr.w.Close()
return nil
}
// WriteInt32 writes a slice of int32 values in npy format.
func (wtr *NpyWriter) WriteInt32(data []int32) error {
err := wtr.writeHeader("i4", len(data))
if err != nil {
return err
}
err = binary.Write(wtr.w, wtr.Endian, data)
if err != nil {
return err
}
wtr.w.Close()
return nil
}
// WriteInt16 writes a slice of int16 values in npy format.
func (wtr *NpyWriter) WriteInt16(data []int16) error {
err := wtr.writeHeader("i2", len(data))
if err != nil {
return err
}
err = binary.Write(wtr.w, wtr.Endian, data)
if err != nil {
return err
}
wtr.w.Close()
return nil
}
// WriteInt8 writes a slice of int8 values in npy format.
func (wtr *NpyWriter) WriteInt8(data []int8) error {
err := wtr.writeHeader("i1", len(data))
if err != nil {
return err
}
err = binary.Write(wtr.w, wtr.Endian, data)
if err != nil {
return err
}
wtr.w.Close()
return nil
} | defs_gen.go | 0.822688 | 0.429908 | defs_gen.go | starcoder |
package skills
import (
"github.com/wieku/danser-go/app/beatmap/difficulty"
"github.com/wieku/danser-go/app/oppai/preprocessing"
"math"
"sort"
)
type Skill struct {
// Strain values are multiplied by this number for the given skill. Used to balance the value of different skills between each other.
SkillMultiplier float64
// Determines how quickly strain decays for the given skill.
// For example a value of 0.15 indicates that strain decays to 15% of its original value in one second.
StrainDecayBase float64
// The weight by which each strain value decays.
DecayWeight float64
// The length of each strain section.
SectionLength float64
// How many DifficultyObjects should be kept
HistoryLength int
// Keeps track of previous DifficultyObjects for strain section calculations
Previous []*preprocessing.DifficultyObject
// The current strain level
CurrentStrain float64
// Delegate to calculate strain value of skill
StrainValueOf func(skill *Skill, obj *preprocessing.DifficultyObject) float64
currentSectionPeak float64
currentSectionEnd float64
strainPeaks []float64
// Should fixed clock rate calculations be used, set to false to use current osu!stable calculations (2021.01)
fixedCalculations bool
diff *difficulty.Difficulty
}
func NewSkill(useFixedCalculations bool, d *difficulty.Difficulty) *Skill {
return &Skill{
DecayWeight: 0.9,
SectionLength: 400,
HistoryLength: 1,
fixedCalculations: useFixedCalculations,
diff: d,
}
}
func (skill *Skill) processInternal(current *preprocessing.DifficultyObject) {
var startTime float64
if skill.fixedCalculations {
startTime = current.StartTime
} else {
startTime = current.BaseObject.GetStartTime()
}
if len(skill.Previous) == 0 {
skill.currentSectionEnd = math.Ceil(startTime/skill.SectionLength) * skill.SectionLength
}
for startTime > skill.currentSectionEnd {
skill.saveCurrentPeak()
skill.startNewSectionFrom(skill.currentSectionEnd)
if skill.fixedCalculations {
skill.currentSectionEnd += skill.SectionLength
} else {
skill.currentSectionEnd += skill.SectionLength * skill.diff.Speed
}
}
skill.CurrentStrain *= skill.strainDecay(current.DeltaTime)
skill.CurrentStrain += skill.StrainValueOf(skill, current) * skill.SkillMultiplier
skill.currentSectionPeak = math.Max(skill.CurrentStrain, skill.currentSectionPeak)
}
// Processes given DifficultyObject
func (skill *Skill) Process(current *preprocessing.DifficultyObject) {
if len(skill.Previous) > skill.HistoryLength {
skill.Previous = skill.Previous[len(skill.Previous)-skill.HistoryLength:]
}
skill.processInternal(current)
skill.Previous = append(skill.Previous, current)
}
func (skill *Skill) GetPrevious() *preprocessing.DifficultyObject {
if len(skill.Previous) == 0 {
return nil
}
return skill.Previous[len(skill.Previous)-1]
}
func (skill *Skill) GetCurrentStrainPeaks() []float64 {
peaks := make([]float64, len(skill.strainPeaks)+1)
copy(peaks, skill.strainPeaks)
peaks[len(peaks)-1] = skill.currentSectionPeak
return peaks
}
func (skill *Skill) DifficultyValue() float64 {
diff := 0.0
weight := 1.0
strains := reverseSortFloat64s(skill.GetCurrentStrainPeaks())
for _, strain := range strains {
diff += strain * weight
weight *= skill.DecayWeight
}
return diff
}
func (skill *Skill) strainDecay(ms float64) float64 {
return math.Pow(skill.StrainDecayBase, ms/1000)
}
func (skill *Skill) saveCurrentPeak() {
skill.strainPeaks = append(skill.strainPeaks, skill.currentSectionPeak)
}
func (skill *Skill) startNewSectionFrom(end float64) {
var startTime float64
if skill.fixedCalculations {
startTime = skill.GetPrevious().StartTime
} else {
startTime = skill.GetPrevious().BaseObject.GetStartTime()
}
skill.currentSectionPeak = skill.CurrentStrain * skill.strainDecay(end-startTime)
}
func reverseSortFloat64s(arr []float64) []float64 {
x := make([]float64, len(arr))
copy(x, arr)
sort.Float64s(x)
n := len(x)
for i := 0; i < n/2; i++ {
j := n - i - 1
x[i], x[j] = x[j], x[i]
}
return x
} | app/oppai/skills/skill.go | 0.696371 | 0.442155 | skill.go | starcoder |
package encode
import (
"github.com/0987363/go-geobuf/pkg/geojson"
"github.com/0987363/go-geobuf/pkg/geometry"
"github.com/0987363/go-geobuf/pkg/math"
"github.com/0987363/go-geobuf/proto"
)
const (
GeometryPoint = "Point"
GeometryMultiPoint = "MultiPoint"
GeometryLineString = "LineString"
GeometryMultiLineString = "MultiLineString"
GeometryPolygon = "Polygon"
GeometryMultiPolygon = "MultiPolygon"
)
func EncodeGeometry(g *geojson.Geometry, opt *EncodingConfig) *proto.Data_Geometry {
switch g.Type {
case geojson.GeometryPointType:
p := g.Coordinates.(geometry.Point)
return &proto.Data_Geometry{
Type: proto.Data_Geometry_POINT,
Coords: translateCoords(opt.Precision, p[:]),
}
case geojson.GeometryMultiPointType:
p := g.Coordinates.(geometry.MultiPoint)
return &proto.Data_Geometry{
Type: proto.Data_Geometry_MULTIPOINT,
Coords: translateLine(opt.Precision, opt.Dimension, p, false),
}
case geojson.GeometryLineStringType:
p := g.Coordinates.(geometry.LineString)
return &proto.Data_Geometry{
Type: proto.Data_Geometry_LINESTRING,
Coords: translateLine(opt.Precision, opt.Dimension, p, false),
}
case geojson.GeometryMultiLineStringType:
p := g.Coordinates.(geometry.MultiLineString)
coords, lengths := translateMultiLine(opt.Precision, opt.Dimension, p)
return &proto.Data_Geometry{
Type: proto.Data_Geometry_MULTILINESTRING,
Coords: coords,
Lengths: lengths,
}
case geojson.GeometryPolygonType:
p := []geometry.Ring(g.Coordinates.(geometry.Polygon))
coords, lengths := translateMultiRing(opt.Precision, opt.Dimension, p)
return &proto.Data_Geometry{
Type: proto.Data_Geometry_POLYGON,
Coords: coords,
Lengths: lengths,
}
case geojson.GeometryMultiPolygonType:
p := []geometry.Polygon(g.Coordinates.(geometry.MultiPolygon))
coords, lengths := translateMultiPolygon(opt.Precision, opt.Dimension, p)
return &proto.Data_Geometry{
Type: proto.Data_Geometry_MULTIPOLYGON,
Coords: coords,
Lengths: lengths,
}
}
return nil
}
func translateMultiLine(e uint, dim uint, lines []geometry.LineString) ([]int64, []uint32) {
lengths := make([]uint32, len(lines))
coords := []int64{}
for i, line := range lines {
lengths[i] = uint32(len(line))
coords = append(coords, translateLine(e, dim, line, false)...)
}
return coords, lengths
}
func translateMultiPolygon(e uint, dim uint, polygons []geometry.Polygon) ([]int64, []uint32) {
lengths := []uint32{uint32(len(polygons))}
coords := []int64{}
for _, rings := range polygons {
lengths = append(lengths, uint32(len(rings)))
newLine, newLength := translateMultiRing(e, dim, rings)
lengths = append(lengths, newLength...)
coords = append(coords, newLine...)
}
return coords, lengths
}
func translateMultiRing(e uint, dim uint, lines []geometry.Ring) ([]int64, []uint32) {
lengths := make([]uint32, len(lines))
coords := []int64{}
for i, line := range lines {
lengths[i] = uint32(len(line) - 1)
newLine := translateLine(e, dim, line, true)
coords = append(coords, newLine...)
}
return coords, lengths
}
/*
Since we're converting floats to ints, we can get additional compression out of
how Google does varint encoding (#1). Smaller numbers can be packed into less bytes,
even when using large primitives (int64). To take advantage of this, we subtract
out the prior coordinate x/y value from the current coordinate x/y value to (hopefully)
reduce the number to a small integer.
For example: (123.123, 234.234), (123.134, 234.236) would be encoded out to
(123123, 234234), (11, 2). The first point takes the full penalty for encoding size,
while the remaining points become much smaller.
A further enhancement comes from the fact that lines that start and end in the same place,
such as with a polygon, we can skip the last point, and place it back when we decode.
1. https://developers.google.com/protocol-buffers/docs/encoding#varints
*/
func translateLine(precision uint, dim uint, points []geometry.Point, isClosed bool) []int64 {
sums := make([]int64, dim)
ret := make([]int64, len(points)*int(dim))
for i, point := range points {
for j, p := range point {
n := math.IntWithPrecision(p, precision) - sums[j]
ret[(int(dim)*i)+j] = n
sums[j] = sums[j] + n
}
}
if isClosed {
return ret[:(len(ret) - int(dim))]
}
return ret
}
// Converts a floating point geojson point to int64 by multiplying it by a factor of 10,
// potentially truncating and rounding
func translateCoords(precision uint, point []float64) []int64 {
ret := make([]int64, len(point))
for i, p := range point {
ret[i] = math.IntWithPrecision(p, precision)
}
return ret
} | pkg/encode/geometry.go | 0.725843 | 0.508239 | geometry.go | starcoder |
package template
var COMMON_GRAPHQLS = `
"""
expression to compare columns of type _jsonb. All fields are combined with logical 'AND'.
"""
input JsonbComparisonExp {
_eq: Jsonb
_gt: Jsonb
_gte: Jsonb
_in: [Jsonb!]
_is_null: Boolean
_lt: Jsonb
_lte: Jsonb
_neq: Jsonb
_nin: [Jsonb!]
}
"""
expression to compare columns of type bigint. All fields are combined with logical 'AND'.
"""
input BigintComparisonExp {
_eq: Bigint
_gt: Bigint
_gte: Bigint
_in: [Bigint!]
_is_null: Boolean
_lt: Bigint
_lte: Bigint
_neq: Bigint
_nin: [Bigint!]
}
"""
expression to compare columns of type Boolean. All fields are combined with logical 'AND'.
"""
input BooleanComparisonExp {
_eq: Boolean
_gt: Boolean
_gte: Boolean
_in: [Boolean!]
_is_null: Boolean
_lt: Boolean
_lte: Boolean
_neq: Boolean
_nin: [Boolean!]
}
"""
expression to compare columns of type Int. All fields are combined with logical 'AND'.
"""
input IntComparisonExp {
_eq: Int
_gt: Int
_gte: Int
_in: [Int!]
_is_null: Boolean
_lt: Int
_lte: Int
_neq: Int
_nin: [Int!]
}
"""
expression to compare columns of type Float. All fields are combined with logical 'AND'.
"""
input FloatComparisonExp{
_eq: Float
_gt: Float
_gte: Float
_in: [Float!]
_is_null: Boolean
_lt: Float
_lte: Float
_neq: Float
_nin: [Float!]
}
"""
column ordering options
"""
enum OrderBy {
"""
in the ascending order, nulls last
"""
asc
"""
in the ascending order, nulls first
"""
asc_nulls_first
"""
in the ascending order, nulls last
"""
asc_nulls_last
"""
in the descending order, nulls first
"""
desc
"""
in the descending order, nulls first
"""
desc_nulls_first
"""
in the descending order, nulls last
"""
desc_nulls_last
}
"""
expression to compare columns of type String. All fields are combined with logical 'AND'.
"""
input StringComparisonExp {
_eq: String
_gt: String
_gte: String
_ilike: String
_in: [String!]
_is_null: Boolean
_like: String
_lt: String
_lte: String
_neq: String
_nilike: String
_nin: [String!]
_nlike: String
_nsimilar: String
_similar: String
}
"""
expression to compare columns of type timestamptz. All fields are combined with logical 'AND'.
"""
input TimestamptzComparisonExp {
_eq: Timestamptz
_gt: Timestamptz
_gte: Timestamptz
_in: [Timestamptz!]
_is_null: Boolean
_lt: Timestamptz
_lte: Timestamptz
_neq: Timestamptz
_nin: [Timestamptz!]
}
"""
expression to compare columns of type numeric. All fields are combined with logical 'AND'.
"""
input NumericComparisonExp {
_eq: Numeric
_gt: Numeric
_gte: Numeric
_in: [Numeric!]
_is_null: Boolean
_lt: Numeric
_lte: Numeric
_neq: Numeric
_nin: [Numeric!]
}
"""
expression to compare columns of type point. All fields are combined with logical 'AND'.
"""
input PointComparisonExp {
_eq: Point
_gt: Point
_gte: Point
_in: [Point!]
_is_null: Boolean
_lt: Point
_lte: Point
_neq: Point
_nin: [Point!]
}
scalar Jsonb
scalar Bigint
scalar Timestamptz
scalar Point
scalar Numeric
` | gen/template/common_graphqls.go | 0.623377 | 0.420183 | common_graphqls.go | starcoder |
package onshape
import (
"encoding/json"
)
// BTPArgumentDeclaration232 struct for BTPArgumentDeclaration232
type BTPArgumentDeclaration232 struct {
BTPNode7
BtType *string `json:"btType,omitempty"`
Name *BTPIdentifier8 `json:"name,omitempty"`
StandardType *string `json:"standardType,omitempty"`
Type *BTPTypeName290 `json:"type,omitempty"`
TypeName *string `json:"typeName,omitempty"`
}
// NewBTPArgumentDeclaration232 instantiates a new BTPArgumentDeclaration232 object
// This constructor will assign default values to properties that have it defined,
// and makes sure properties required by API are set, but the set of arguments
// will change when the set of required properties is changed
func NewBTPArgumentDeclaration232() *BTPArgumentDeclaration232 {
this := BTPArgumentDeclaration232{}
return &this
}
// NewBTPArgumentDeclaration232WithDefaults instantiates a new BTPArgumentDeclaration232 object
// This constructor will only assign default values to properties that have it defined,
// but it doesn't guarantee that properties required by API are set
func NewBTPArgumentDeclaration232WithDefaults() *BTPArgumentDeclaration232 {
this := BTPArgumentDeclaration232{}
return &this
}
// GetBtType returns the BtType field value if set, zero value otherwise.
func (o *BTPArgumentDeclaration232) GetBtType() string {
if o == nil || o.BtType == nil {
var ret string
return ret
}
return *o.BtType
}
// GetBtTypeOk returns a tuple with the BtType field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *BTPArgumentDeclaration232) GetBtTypeOk() (*string, bool) {
if o == nil || o.BtType == nil {
return nil, false
}
return o.BtType, true
}
// HasBtType returns a boolean if a field has been set.
func (o *BTPArgumentDeclaration232) HasBtType() bool {
if o != nil && o.BtType != nil {
return true
}
return false
}
// SetBtType gets a reference to the given string and assigns it to the BtType field.
func (o *BTPArgumentDeclaration232) SetBtType(v string) {
o.BtType = &v
}
// GetName returns the Name field value if set, zero value otherwise.
func (o *BTPArgumentDeclaration232) GetName() BTPIdentifier8 {
if o == nil || o.Name == nil {
var ret BTPIdentifier8
return ret
}
return *o.Name
}
// GetNameOk returns a tuple with the Name field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *BTPArgumentDeclaration232) GetNameOk() (*BTPIdentifier8, bool) {
if o == nil || o.Name == nil {
return nil, false
}
return o.Name, true
}
// HasName returns a boolean if a field has been set.
func (o *BTPArgumentDeclaration232) HasName() bool {
if o != nil && o.Name != nil {
return true
}
return false
}
// SetName gets a reference to the given BTPIdentifier8 and assigns it to the Name field.
func (o *BTPArgumentDeclaration232) SetName(v BTPIdentifier8) {
o.Name = &v
}
// GetStandardType returns the StandardType field value if set, zero value otherwise.
func (o *BTPArgumentDeclaration232) GetStandardType() string {
if o == nil || o.StandardType == nil {
var ret string
return ret
}
return *o.StandardType
}
// GetStandardTypeOk returns a tuple with the StandardType field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *BTPArgumentDeclaration232) GetStandardTypeOk() (*string, bool) {
if o == nil || o.StandardType == nil {
return nil, false
}
return o.StandardType, true
}
// HasStandardType returns a boolean if a field has been set.
func (o *BTPArgumentDeclaration232) HasStandardType() bool {
if o != nil && o.StandardType != nil {
return true
}
return false
}
// SetStandardType gets a reference to the given string and assigns it to the StandardType field.
func (o *BTPArgumentDeclaration232) SetStandardType(v string) {
o.StandardType = &v
}
// GetType returns the Type field value if set, zero value otherwise.
func (o *BTPArgumentDeclaration232) GetType() BTPTypeName290 {
if o == nil || o.Type == nil {
var ret BTPTypeName290
return ret
}
return *o.Type
}
// GetTypeOk returns a tuple with the Type field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *BTPArgumentDeclaration232) GetTypeOk() (*BTPTypeName290, bool) {
if o == nil || o.Type == nil {
return nil, false
}
return o.Type, true
}
// HasType returns a boolean if a field has been set.
func (o *BTPArgumentDeclaration232) HasType() bool {
if o != nil && o.Type != nil {
return true
}
return false
}
// SetType gets a reference to the given BTPTypeName290 and assigns it to the Type field.
func (o *BTPArgumentDeclaration232) SetType(v BTPTypeName290) {
o.Type = &v
}
// GetTypeName returns the TypeName field value if set, zero value otherwise.
func (o *BTPArgumentDeclaration232) GetTypeName() string {
if o == nil || o.TypeName == nil {
var ret string
return ret
}
return *o.TypeName
}
// GetTypeNameOk returns a tuple with the TypeName field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *BTPArgumentDeclaration232) GetTypeNameOk() (*string, bool) {
if o == nil || o.TypeName == nil {
return nil, false
}
return o.TypeName, true
}
// HasTypeName returns a boolean if a field has been set.
func (o *BTPArgumentDeclaration232) HasTypeName() bool {
if o != nil && o.TypeName != nil {
return true
}
return false
}
// SetTypeName gets a reference to the given string and assigns it to the TypeName field.
func (o *BTPArgumentDeclaration232) SetTypeName(v string) {
o.TypeName = &v
}
func (o BTPArgumentDeclaration232) MarshalJSON() ([]byte, error) {
toSerialize := map[string]interface{}{}
serializedBTPNode7, errBTPNode7 := json.Marshal(o.BTPNode7)
if errBTPNode7 != nil {
return []byte{}, errBTPNode7
}
errBTPNode7 = json.Unmarshal([]byte(serializedBTPNode7), &toSerialize)
if errBTPNode7 != nil {
return []byte{}, errBTPNode7
}
if o.BtType != nil {
toSerialize["btType"] = o.BtType
}
if o.Name != nil {
toSerialize["name"] = o.Name
}
if o.StandardType != nil {
toSerialize["standardType"] = o.StandardType
}
if o.Type != nil {
toSerialize["type"] = o.Type
}
if o.TypeName != nil {
toSerialize["typeName"] = o.TypeName
}
return json.Marshal(toSerialize)
}
type NullableBTPArgumentDeclaration232 struct {
value *BTPArgumentDeclaration232
isSet bool
}
func (v NullableBTPArgumentDeclaration232) Get() *BTPArgumentDeclaration232 {
return v.value
}
func (v *NullableBTPArgumentDeclaration232) Set(val *BTPArgumentDeclaration232) {
v.value = val
v.isSet = true
}
func (v NullableBTPArgumentDeclaration232) IsSet() bool {
return v.isSet
}
func (v *NullableBTPArgumentDeclaration232) Unset() {
v.value = nil
v.isSet = false
}
func NewNullableBTPArgumentDeclaration232(val *BTPArgumentDeclaration232) *NullableBTPArgumentDeclaration232 {
return &NullableBTPArgumentDeclaration232{value: val, isSet: true}
}
func (v NullableBTPArgumentDeclaration232) MarshalJSON() ([]byte, error) {
return json.Marshal(v.value)
}
func (v *NullableBTPArgumentDeclaration232) UnmarshalJSON(src []byte) error {
v.isSet = true
return json.Unmarshal(src, &v.value)
} | onshape/model_btp_argument_declaration_232.go | 0.716119 | 0.601711 | model_btp_argument_declaration_232.go | starcoder |
package main
type HealthECGView struct {
// Color of the current line
Color [3]int
// Lines to the left of the current line will have a darker color
Gradient [3]int
// The first value is the yOffset, the second value is for the line height
// We need to add 1 to the line height so that if the height is 0, at least 1 pixel is rendered
Lines [80][2]int
}
func NewHealthECGFine() HealthECGView {
lines := [80][2]int{
{15, 0}, {15, 0}, {15, 0}, {15, 0}, {15, 0}, {15, 0}, {15, 0}, {15, 0}, {15, 0}, {15, 0},
{15, 0}, {15, 0}, {15, 0}, {14, 0}, {13, 0}, {12, 0}, {12, 0}, {13, 2}, {15, 3}, {18, 2},
{20, 0}, {16, 4}, {8, 8}, {5, 3}, {4, 0}, {5, 3}, {8, 7}, {15, 4}, {19, 5}, {24, 3},
{27, 0}, {25, 2}, {21, 4}, {16, 5}, {14, 2}, {13, 0}, {14, 2}, {16, 3}, {19, 0}, {19, 0},
{18, 0}, {16, 2}, {14, 2}, {13, 0}, {12, 0}, {13, 0}, {14, 1}, {15, 0}, {15, 0}, {15, 0},
{15, 0}, {15, 0}, {15, 0}, {15, 0}, {15, 0}, {15, 0}, {15, 0}, {15, 0}, {15, 0}, {15, 0},
{15, 0}, {15, 0}, {15, 0}, {15, 0}, {15, 0}, {15, 0}, {15, 0}, {15, 0}, {15, 0}, {15, 0},
{15, 0}, {15, 0}, {15, 0}, {15, 0}, {15, 0}, {15, 0}, {15, 0}, {15, 0}, {15, 0}, {15, 0},
}
return HealthECGView{
Color: [3]int{20, 255, 20}, // green,
Gradient: [3]int{1, 8, 1},
Lines: lines,
}
}
func NewHealthECGYellowCaution() HealthECGView {
lines := [80][2]int{
{15, 0}, {15, 0}, {15, 0}, {15, 0}, {15, 0}, {15, 0}, {15, 0}, {15, 0}, {15, 0}, {15, 0},
{15, 0}, {15, 0}, {14, 0}, {13, 0}, {12, 0}, {12, 0}, {14, 0}, {13, 2}, {15, 3}, {18, 2},
{20, 0}, {16, 4}, {8, 8}, {6, 2}, {5, 0}, {6, 2}, {8, 5}, {13, 2}, {15, 0}, {16, 4},
{20, 2}, {22, 0}, {21, 0}, {16, 5}, {15, 0}, {14, 0}, {14, 0}, {13, 0}, {13, 0}, {14, 0},
{15, 0}, {15, 0}, {15, 0}, {14, 0}, {14, 0}, {14, 0}, {15, 0}, {15, 0}, {15, 0}, {15, 0},
{15, 0}, {15, 0}, {15, 0}, {15, 0}, {15, 0}, {15, 0}, {15, 0}, {15, 0}, {15, 0}, {15, 0},
{15, 0}, {15, 0}, {15, 0}, {15, 0}, {15, 0}, {15, 0}, {15, 0}, {15, 0}, {15, 0}, {15, 0},
{15, 0}, {15, 0}, {15, 0}, {15, 0}, {15, 0}, {15, 0}, {15, 0}, {15, 0}, {15, 0}, {15, 0},
}
return HealthECGView{
Color: [3]int{255, 255, 20}, // yellow
Gradient: [3]int{8, 8, 1},
Lines: lines,
}
}
func NewHealthECGOrangeCaution() HealthECGView {
lines := [80][2]int{
{15, 0}, {15, 0}, {15, 0}, {15, 0}, {15, 0}, {15, 0}, {15, 0}, {15, 0}, {15, 0}, {15, 0},
{15, 0}, {15, 0}, {16, 0}, {16, 0}, {17, 0}, {17, 0}, {17, 0}, {16, 0}, {15, 0}, {14, 0},
{14, 0}, {14, 0}, {15, 0}, {15, 0}, {15, 0}, {14, 0}, {11, 3}, {10, 0}, {9, 0}, {10, 4},
{13, 3}, {16, 3}, {19, 0}, {20, 0}, {19, 0}, {18, 0}, {16, 2}, {14, 2}, {13, 0}, {12, 0},
{12, 0}, {12, 0}, {13, 0}, {14, 0}, {15, 0}, {15, 0}, {15, 0}, {15, 0}, {15, 0}, {15, 0},
{15, 0}, {15, 0}, {15, 0}, {15, 0}, {15, 0}, {15, 0}, {15, 0}, {15, 0}, {15, 0}, {15, 0},
{15, 0}, {15, 0}, {15, 0}, {15, 0}, {15, 0}, {15, 0}, {15, 0}, {15, 0}, {15, 0}, {15, 0},
{15, 0}, {15, 0}, {15, 0}, {15, 0}, {15, 0}, {15, 0}, {15, 0}, {15, 0}, {15, 0}, {15, 0},
}
return HealthECGView{
Color: [3]int{255, 80, 20}, // orange
Gradient: [3]int{8, 4, 1},
Lines: lines,
}
}
func NewHealthECGDanger() HealthECGView {
lines := [80][2]int{
{15, 0}, {15, 0}, {15, 0}, {15, 0}, {15, 0}, {15, 0}, {15, 0}, {15, 0}, {15, 0}, {15, 0},
{15, 0}, {15, 0}, {15, 0}, {15, 0}, {15, 0}, {15, 0}, {15, 0}, {15, 0}, {15, 0}, {14, 0},
{14, 0}, {13, 0}, {13, 0}, {14, 0}, {15, 0}, {15, 0}, {15, 0}, {16, 0}, {17, 2}, {17, 0},
{17, 0}, {14, 3}, {10, 4}, {9, 0}, {10, 2}, {12, 3}, {15, 0}, {16, 0}, {16, 0}, {16, 0},
{16, 0}, {15, 0}, {14, 0}, {13, 0}, {13, 0}, {14, 0}, {14, 0}, {15, 0}, {15, 0}, {15, 0},
{15, 0}, {15, 0}, {15, 0}, {15, 0}, {15, 0}, {15, 0}, {15, 0}, {15, 0}, {15, 0}, {15, 0},
{15, 0}, {15, 0}, {15, 0}, {15, 0}, {15, 0}, {15, 0}, {15, 0}, {15, 0}, {15, 0}, {15, 0},
{15, 0}, {15, 0}, {15, 0}, {15, 0}, {15, 0}, {15, 0}, {15, 0}, {15, 0}, {15, 0}, {15, 0},
}
return HealthECGView{
Color: [3]int{255, 20, 20}, // red
Gradient: [3]int{8, 1, 1},
Lines: lines,
}
}
func NewHealthECGPoison() HealthECGView {
lines := [80][2]int{
{15, 0}, {15, 0}, {15, 0}, {15, 0}, {15, 0}, {15, 0}, {15, 0}, {15, 0}, {15, 0}, {15, 0},
{15, 0}, {15, 0}, {15, 0}, {15, 0}, {15, 0}, {15, 0}, {14, 1}, {13, 1}, {12, 1}, {12, 1},
{13, 2}, {15, 3}, {18, 2}, {20, 1}, {16, 4}, {8, 8}, {5, 3}, {4, 1}, {5, 3}, {8, 7},
{15, 4}, {19, 5}, {24, 2}, {26, 1}, {25, 1}, {15, 10}, {14, 1}, {15, 1}, {16, 2}, {18, 1},
{17, 1}, {10, 7}, {9, 1}, {10, 2}, {12, 4}, {16, 1}, {17, 1}, {18, 1}, {18, 1}, {18, 1},
{17, 1}, {16, 1}, {15, 1}, {15, 1}, {15, 1}, {15, 1}, {15, 1}, {12, 3}, {10, 2}, {9, 1},
{10, 6}, {16, 3}, {19, 1}, {19, 1}, {17, 2}, {15, 0}, {15, 0}, {15, 0}, {15, 0}, {15, 0},
{15, 0}, {15, 0}, {15, 0}, {15, 0}, {15, 0}, {15, 0}, {15, 0}, {15, 0}, {15, 0}, {15, 0},
}
return HealthECGView{
Color: [3]int{255, 20, 255}, // purple
Gradient: [3]int{8, 1, 8},
Lines: lines,
}
} | ecg.go | 0.70477 | 0.777849 | ecg.go | starcoder |
package simulator
import (
"time"
)
const (
maxUsageRecorded = 50
)
// UsageRecord records which node was considered helpful to which node during pod rescheduling analysis.
type UsageRecord struct {
usingTooMany bool
using map[string]time.Time
usedByTooMany bool
usedBy map[string]time.Time
}
// UsageTracker track usage relationship between nodes in pod rescheduling calculations.
type UsageTracker struct {
usage map[string]*UsageRecord
}
// NewUsageTracker builds new usage tracker.
func NewUsageTracker() *UsageTracker {
return &UsageTracker{
usage: make(map[string]*UsageRecord),
}
}
// Get gets the given node UsageRecord, if present
func (tracker *UsageTracker) Get(node string) (data *UsageRecord, found bool) {
data, found = tracker.usage[node]
return data, found
}
// RegisterUsage registers that node A uses nodeB during usage calculations at time timestamp.
func (tracker *UsageTracker) RegisterUsage(nodeA string, nodeB string, timestamp time.Time) {
if record, found := tracker.usage[nodeA]; found {
if len(record.using) >= maxUsageRecorded {
record.usingTooMany = true
} else {
record.using[nodeB] = timestamp
}
} else {
record := UsageRecord{
using: make(map[string]time.Time),
usedBy: make(map[string]time.Time),
}
record.using[nodeB] = timestamp
tracker.usage[nodeA] = &record
}
if record, found := tracker.usage[nodeB]; found {
if len(record.usedBy) >= maxUsageRecorded {
record.usedByTooMany = true
} else {
record.usedBy[nodeA] = timestamp
}
} else {
record := UsageRecord{
using: make(map[string]time.Time),
usedBy: make(map[string]time.Time),
}
record.usedBy[nodeA] = timestamp
tracker.usage[nodeB] = &record
}
}
// Unregister removes the given node from all usage records
func (tracker *UsageTracker) Unregister(node string) {
if record, found := tracker.usage[node]; found {
for using := range record.using {
if record2, found := tracker.usage[using]; found {
delete(record2.usedBy, node)
}
}
for usedBy := range record.usedBy {
if record2, found := tracker.usage[usedBy]; found {
delete(record2.using, node)
}
}
delete(tracker.usage, node)
}
}
func filterOutOld(timestampMap map[string]time.Time, cutoff time.Time) {
toRemove := make([]string, 0)
for key, timestamp := range timestampMap {
if timestamp.Before(cutoff) {
toRemove = append(toRemove, key)
}
}
for _, key := range toRemove {
delete(timestampMap, key)
}
}
// CleanUp removes all relations updated before the cutoff time.
func (tracker *UsageTracker) CleanUp(cutoff time.Time) {
toDelete := make([]string, 0)
for key, usageRecord := range tracker.usage {
if !usageRecord.usingTooMany {
filterOutOld(usageRecord.using, cutoff)
}
if !usageRecord.usedByTooMany {
filterOutOld(usageRecord.usedBy, cutoff)
}
if !usageRecord.usingTooMany && !usageRecord.usedByTooMany && len(usageRecord.using) == 0 && len(usageRecord.usedBy) == 0 {
toDelete = append(toDelete, key)
}
}
for _, key := range toDelete {
delete(tracker.usage, key)
}
}
// RemoveNodeFromTracker removes node from tracker and also cleans the passed utilization map.
func RemoveNodeFromTracker(tracker *UsageTracker, node string, utilization map[string]time.Time) {
keysToRemove := make([]string, 0)
if mainRecord, found := tracker.Get(node); found {
if mainRecord.usingTooMany {
keysToRemove = getAllKeys(utilization)
} else {
usingloop:
for usedNode := range mainRecord.using {
if usedNodeRecord, found := tracker.Get(usedNode); found {
if usedNodeRecord.usedByTooMany {
keysToRemove = getAllKeys(utilization)
break usingloop
} else {
for anotherNode := range usedNodeRecord.usedBy {
keysToRemove = append(keysToRemove, anotherNode)
}
}
}
}
}
}
tracker.Unregister(node)
delete(utilization, node)
for _, key := range keysToRemove {
delete(utilization, key)
}
}
func getAllKeys(m map[string]time.Time) []string {
result := make([]string, 0, len(m))
for key := range m {
result = append(result, key)
}
return result
} | simulator/tracker.go | 0.613352 | 0.457924 | tracker.go | starcoder |
package sudogo
import (
"math"
)
// The classic 9x9 puzzle with 9 boxes of 3x3 and digits 1-9.
var Classic = &Kind{
BoxSize: Size{3, 3},
}
// A 4x4 puzzle with 4 boxes of 2x2 and digits 1-4.
var Kind2x2 = &Kind{
BoxSize: Size{2, 2},
}
// A 6x6 puzzle with 6 boxes of 3x2 and digits 1-6.
var Kind3x2 = &Kind{
BoxSize: Size{3, 2},
}
// A 12x12 puzzle with 12 boxes of 4x3 and digits 1-12.
var Kind4x3 = &Kind{
BoxSize: Size{4, 3},
}
// A 16x16 puzzle with 16 boxes of 4x4 and digits 1-16.
var Kind4x4 = &Kind{
BoxSize: Size{4, 4},
}
type Kind struct {
BoxSize Size
Constraints []Constraint
}
func NewKind(boxWidth int, boxHeight int) *Kind {
return &Kind{
BoxSize: Size{
Width: boxWidth,
Height: boxHeight,
},
}
}
func (kind *Kind) Clone() *Kind {
return &Kind{
BoxSize: kind.BoxSize,
Constraints: sliceClone(kind.Constraints),
}
}
// The width, height, and number of digits in this puzzle kind.
func (kind *Kind) Size() int {
return kind.BoxSize.Width * kind.BoxSize.Height
}
// The number of unique digits in this puzzle kind.
func (kind *Kind) Digits() int {
return kind.Size()
}
// How many boxes wide the puzzle would be.
func (kind *Kind) BoxesWide() int {
return kind.BoxSize.Height
}
// How many boxes high the puzzle would be.
func (kind *Kind) BoxesHigh() int {
return kind.BoxSize.Width
}
// How many cells would be in the puzzle.
func (kind *Kind) Area() int {
return kind.Size() * kind.Size()
}
// How many characters it could take to print out the largest digit of a value in this puzzle kind.
func (kind *Kind) DigitsSize() int {
return int(math.Floor(math.Log10(float64(kind.Digits())))) + 1
}
// Returns the dimensions of a puzzle of this kind.
func (kind *Kind) GetDimensions() (boxsWide int, boxsHigh int, boxWidth int, boxHeight int, size int) {
w := kind.BoxSize.Width
h := kind.BoxSize.Height
return h, w, w, h, w * h
}
// Creates an empty puzzle of this kind.
func (kind *Kind) Empty() Puzzle {
return New(kind)
}
// Creates a puzzle with an initial set of values of this kind.
func (kind *Kind) Create(values [][]int) Puzzle {
instance := New(kind)
instance.SetAll(values)
return instance
}
// Creates a generator for puzzles of this kind.
func (kind *Kind) Generator() Generator {
return NewGenerator(kind)
}
func (kind *Kind) ConstraintsFor(cell *Cell) []Constraint {
constraints := make([]Constraint, 0, len(kind.Constraints))
for _, c := range kind.Constraints {
if c.Affects(cell) {
constraints = append(constraints, c)
}
}
return constraints
} | pkg/kind.go | 0.728555 | 0.514705 | kind.go | starcoder |
package basic
import (
"math"
"github.com/starainrt/astro/planet"
. "github.com/starainrt/astro/tools"
)
func JupiterL(JD float64) float64 {
return planet.WherePlanet(4, 0, JD)
}
func JupiterB(JD float64) float64 {
return planet.WherePlanet(4, 1, JD)
}
func JupiterR(JD float64) float64 {
return planet.WherePlanet(4, 2, JD)
}
func AJupiterX(JD float64) float64 {
l := JupiterL(JD)
b := JupiterB(JD)
r := JupiterR(JD)
el := planet.WherePlanet(-1, 0, JD)
eb := planet.WherePlanet(-1, 1, JD)
er := planet.WherePlanet(-1, 2, JD)
x := r*Cos(b)*Cos(l) - er*Cos(eb)*Cos(el)
return x
}
func AJupiterY(JD float64) float64 {
l := JupiterL(JD)
b := JupiterB(JD)
r := JupiterR(JD)
el := planet.WherePlanet(-1, 0, JD)
eb := planet.WherePlanet(-1, 1, JD)
er := planet.WherePlanet(-1, 2, JD)
y := r*Cos(b)*Sin(l) - er*Cos(eb)*Sin(el)
return y
}
func AJupiterZ(JD float64) float64 {
//l := JupiterL(JD)
b := JupiterB(JD)
r := JupiterR(JD)
// el := planet.WherePlanet(-1, 0, JD)
eb := planet.WherePlanet(-1, 1, JD)
er := planet.WherePlanet(-1, 2, JD)
z := r*Sin(b) - er*Sin(eb)
return z
}
func AJupiterXYZ(JD float64) (float64, float64, float64) {
l := JupiterL(JD)
b := JupiterB(JD)
r := JupiterR(JD)
el := planet.WherePlanet(-1, 0, JD)
eb := planet.WherePlanet(-1, 1, JD)
er := planet.WherePlanet(-1, 2, JD)
x := r*Cos(b)*Cos(l) - er*Cos(eb)*Cos(el)
y := r*Cos(b)*Sin(l) - er*Cos(eb)*Sin(el)
z := r*Sin(b) - er*Sin(eb)
return x, y, z
}
func JupiterSeeRa(JD float64) float64 {
lo, bo := JupiterSeeLoBo(JD)
sita := Sita(JD)
ra := math.Atan2((Sin(lo)*Cos(sita) - Tan(bo)*Sin(sita)), Cos(lo))
ra = ra * 180 / math.Pi
return Limit360(ra)
}
func JupiterSeeDec(JD float64) float64 {
lo, bo := JupiterSeeLoBo(JD)
sita := Sita(JD)
dec := ArcSin(Sin(bo)*Cos(sita) + Cos(bo)*Sin(sita)*Sin(lo))
return dec
}
func JupiterSeeRaDec(JD float64) (float64, float64) {
lo, bo := JupiterSeeLoBo(JD)
sita := Sita(JD)
ra := math.Atan2((Sin(lo)*Cos(sita) - Tan(bo)*Sin(sita)), Cos(lo))
ra = ra * 180 / math.Pi
dec := ArcSin(Sin(bo)*Cos(sita) + Cos(bo)*Sin(sita)*Sin(lo))
return Limit360(ra), dec
}
func EarthJupiterAway(JD float64) float64 {
x, y, z := AJupiterXYZ(JD)
to := math.Sqrt(x*x + y*y + z*z)
return to
}
func JupiterSeeLo(JD float64) float64 {
x, y, z := AJupiterXYZ(JD)
to := 0.0057755183 * math.Sqrt(x*x+y*y+z*z)
x, y, z = AJupiterXYZ(JD - to)
lo := math.Atan2(y, x)
bo := math.Atan2(z, math.Sqrt(x*x+y*y))
lo = lo * 180 / math.Pi
bo = bo * 180 / math.Pi
lo = Limit360(lo)
//lo-=GXCLo(lo,bo,JD)/3600;
//bo+=GXCBo(lo,bo,JD);
lo += HJZD(JD)
return lo
}
func JupiterSeeBo(JD float64) float64 {
x, y, z := AJupiterXYZ(JD)
to := 0.0057755183 * math.Sqrt(x*x+y*y+z*z)
x, y, z = AJupiterXYZ(JD - to)
//lo := math.Atan2(y, x)
bo := math.Atan2(z, math.Sqrt(x*x+y*y))
//lo = lo * 180 / math.Pi
bo = bo * 180 / math.Pi
//lo+=GXCLo(lo,bo,JD);
//bo+=GXCBo(lo,bo,JD)/3600;
//lo+=HJZD(JD);
return bo
}
func JupiterSeeLoBo(JD float64) (float64, float64) {
x, y, z := AJupiterXYZ(JD)
to := 0.0057755183 * math.Sqrt(x*x+y*y+z*z)
x, y, z = AJupiterXYZ(JD - to)
lo := math.Atan2(y, x)
bo := math.Atan2(z, math.Sqrt(x*x+y*y))
lo = lo * 180 / math.Pi
bo = bo * 180 / math.Pi
lo = Limit360(lo)
//lo-=GXCLo(lo,bo,JD)/3600;
//bo+=GXCBo(lo,bo,JD);
lo += HJZD(JD)
return lo, bo
}
func JupiterMag(JD float64) float64 {
AwaySun := JupiterR(JD)
AwayEarth := EarthJupiterAway(JD)
Away := planet.WherePlanet(-1, 2, JD)
i := (AwaySun*AwaySun + AwayEarth*AwayEarth - Away*Away) / (2 * AwaySun * AwayEarth)
i = ArcCos(i)
Mag := -9.40 + 5*math.Log10(AwaySun*AwayEarth) + 0.0005*i
return FloatRound(Mag, 2)
} | basic/jupiter.go | 0.698432 | 0.466663 | jupiter.go | starcoder |
package Euler2D
import (
"math"
"sort"
"github.com/notargets/gocfd/DG2D"
"github.com/notargets/gocfd/types"
"github.com/notargets/gocfd/utils"
)
type EdgeKeySlice []types.EdgeKey
func (p EdgeKeySlice) Len() int { return len(p) }
func (p EdgeKeySlice) Less(i, j int) bool {
// Sorted to produce groups of vertex centered edges on the right edge vertex
vnode1, vnode2 := int(p[i]>>32), int(p[j]>>32)
return vnode1 < vnode2
}
func (p EdgeKeySlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
// Sort is a convenience method.
func (p EdgeKeySlice) Sort() { sort.Sort(p) }
type EdgeKeySliceSortLeft EdgeKeySlice
func (p EdgeKeySliceSortLeft) Sort() { sort.Sort(p) }
func (p EdgeKeySliceSortLeft) Len() int { return len(p) }
func (p EdgeKeySliceSortLeft) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
func (p EdgeKeySliceSortLeft) Less(i, j int) bool {
getLeftVert := func(ek types.EdgeKey) (left int) {
enTmp := ek >> 32
left = int(ek - enTmp*(1<<32))
return
}
// Sorted to produce groups of vertex centered edges on the left edge vertex
vnode1, vnode2 := getLeftVert(p[i]), getLeftVert(p[j])
return vnode1 < vnode2
}
type EdgeValueStorage struct {
Fluxes [][4]utils.Matrix
StorageIndex map[types.EdgeKey]int // Index into normal flux storage using edge key
PMap *PartitionMap
Nedge int
}
type ValueType uint8
const (
NumericalFluxForEuler ValueType = iota
QFluxForGradient
GradientFluxForLaplacian
)
var (
FluxIndex = map[ValueType]int{NumericalFluxForEuler: 0, QFluxForGradient: 1, GradientFluxForLaplacian: 2}
)
func (c *Euler) NewEdgeStorage() (nf *EdgeValueStorage) {
var (
NumEdges = len(c.dfr.Tris.Edges)
)
nf = &EdgeValueStorage{
StorageIndex: make(map[types.EdgeKey]int),
PMap: c.Partitions,
Nedge: c.dfr.FluxElement.Nedge,
Fluxes: make([][4]utils.Matrix, len(FluxIndex)),
}
// Allocate memory for fluxes
for i := range nf.Fluxes {
for n := 0; n < 4; n++ {
nf.Fluxes[i][n] = utils.NewMatrix(NumEdges, nf.Nedge)
}
}
var index int
for np := 0; np < c.Partitions.ParallelDegree; np++ {
for _, en := range c.SortedEdgeKeys[np] {
nf.StorageIndex[en] = index
index++
}
}
return
}
func (nf *EdgeValueStorage) GetEdgeValues(valType ValueType, myThread, kLocal, varNum, localEdgeNumber int, dfr *DG2D.DFR2D) (EdgeValues []float64, sign int) {
var (
kGlobal = nf.PMap.GetGlobalK(kLocal, myThread)
Kmax = dfr.K
edgeNum = localEdgeNumber
Nedge = nf.Nedge
target = nf.Fluxes[FluxIndex[valType]][varNum]
)
en := dfr.EdgeNumber[kGlobal+Kmax*edgeNum]
edgeIndex := nf.StorageIndex[en]
ind := edgeIndex * Nedge
EdgeValues = target.DataP[ind : ind+Nedge]
if int(dfr.Tris.Edges[en].ConnectedTris[0]) == kGlobal {
// These values were stored in this element's order
sign = 1
} else {
// These values should be reversed in direction (and if normals, sign as well)
sign = -1
}
return
}
func (nf *EdgeValueStorage) PutEdgeValues(en types.EdgeKey, valType ValueType, EdgeValues [][4]float64) {
var (
target = nf.Fluxes[FluxIndex[valType]]
)
// Load the normal flux into the global normal flux storage
edgeIndex := nf.StorageIndex[en]
for i := 0; i < nf.Nedge; i++ {
ind := i + edgeIndex*nf.Nedge
for n := 0; n < 4; n++ {
target[n].DataP[ind] = EdgeValues[i][n]
}
}
}
func (c *Euler) GetFaceNormal(kGlobal, edgeNumber int) (normal [2]float64) {
var (
KmaxGlobal = c.dfr.K
faceInd = kGlobal + KmaxGlobal*edgeNumber
)
normal = [2]float64{c.dfr.FaceNorm[0].DataP[faceInd], c.dfr.FaceNorm[1].DataP[faceInd]}
return
}
func (c *Euler) CalculateEdgeFlux(Time float64, CalculateDT bool, Jdet, DT []utils.Matrix, Q_Face [][4]utils.Matrix,
edgeKeys EdgeKeySlice, EdgeQ1, EdgeQ2 [][4]float64) (waveSpeedMax float64) {
var (
Nedge = c.dfr.FluxElement.Nedge
numericalFluxForEuler = EdgeQ1
qFluxForGradient = EdgeQ2
pm = c.Partitions
)
for _, en := range edgeKeys {
e := c.dfr.Tris.Edges[en]
var (
k0Global = int(e.ConnectedTris[0])
k0, Kmax0, myThread0 = pm.GetLocalK(int(e.ConnectedTris[0]))
edgeNumber0 = int(e.ConnectedTriEdgeNumber[0])
normal0 = c.GetFaceNormal(k0Global, edgeNumber0)
)
switch e.NumConnectedTris {
case 0:
panic("unable to handle unconnected edges")
case 1: // Handle edges with only one triangle - default is edge flux, which will be replaced by a BC flux
c.calculateNonSharedEdgeFlux(e, Nedge, Time,
k0, Kmax0, edgeNumber0, myThread0,
normal0, numericalFluxForEuler, qFluxForGradient, Q_Face)
case 2: // Handle edges with two connected tris - shared faces
var (
kR, KmaxR, myThreadR = pm.GetLocalK(int(e.ConnectedTris[1]))
edgeNumberR = int(e.ConnectedTriEdgeNumber[1])
)
c.calculateSharedEdgeFlux(Nedge,
k0, Kmax0, edgeNumber0, myThread0,
kR, KmaxR, edgeNumberR, myThreadR,
normal0, numericalFluxForEuler, qFluxForGradient, Q_Face)
}
if CalculateDT {
waveSpeedMax = c.calculateLocalDT(e, Nedge, Q_Face, Jdet, DT)
}
// Load the normal flux into the global normal flux storage
c.EdgeStore.PutEdgeValues(en, NumericalFluxForEuler, numericalFluxForEuler)
c.EdgeStore.PutEdgeValues(en, QFluxForGradient, qFluxForGradient)
}
return
}
func (c *Euler) calculateLocalDT(e *DG2D.Edge, Nedge int,
Q_Face [][4]utils.Matrix, Jdet, DT []utils.Matrix) (waveSpeedMax float64) {
var (
pm = c.Partitions
edgeNum = int(e.ConnectedTriEdgeNumber[0])
k, Kmax, myThread = pm.GetLocalK(int(e.ConnectedTris[0]))
Np1 = c.dfr.N + 1
Np12 = float64(Np1 * Np1)
shift = edgeNum * Nedge
edgeLen = e.GetEdgeLength()
)
fs := 0.5 * Np12 * edgeLen / Jdet[myThread].DataP[k]
edgeMax := -100.
for i := shift; i < shift+Nedge; i++ {
ind := k + i*Kmax
C := c.FS.GetFlowFunction(Q_Face[myThread], ind, SoundSpeed)
U := c.FS.GetFlowFunction(Q_Face[myThread], ind, Velocity)
waveSpeed := fs * (U + C)
waveSpeedMax = math.Max(waveSpeed, waveSpeedMax)
if waveSpeed > edgeMax {
edgeMax = waveSpeed
}
}
if edgeMax > DT[myThread].DataP[k] {
DT[myThread].DataP[k] = edgeMax
}
if e.NumConnectedTris == 2 { // Add the wavespeed to the other tri connected to this edge if needed
k, Kmax, myThread = pm.GetLocalK(int(e.ConnectedTris[1]))
if edgeMax > DT[myThread].DataP[k] {
DT[myThread].DataP[k] = edgeMax
}
}
return
}
func (c *Euler) calculateSharedEdgeFlux(Nedge, kL, KmaxL, edgeNumberL, myThreadL, kR, KmaxR, edgeNumberR, myThreadR int,
normal0 [2]float64, numericalFluxForEuler, qFluxForGradient [][4]float64,
Q_Face [][4]utils.Matrix) {
var (
shiftL, shiftR = edgeNumberL * Nedge, edgeNumberR * Nedge
)
switch c.FluxCalcAlgo {
case FLUX_Average:
c.AvgFlux(kL, kR, KmaxL, KmaxR, shiftL, shiftR, Q_Face[myThreadL], Q_Face[myThreadR], normal0, numericalFluxForEuler)
case FLUX_LaxFriedrichs:
c.LaxFlux(kL, kR, KmaxL, KmaxR, shiftL, shiftR, Q_Face[myThreadL], Q_Face[myThreadR], normal0, numericalFluxForEuler)
case FLUX_Roe:
c.RoeFlux(kL, kR, KmaxL, KmaxR, shiftL, shiftR, Q_Face[myThreadL], Q_Face[myThreadR], normal0, numericalFluxForEuler)
case FLUX_RoeER:
c.RoeERFlux(kL, kR, KmaxL, KmaxR, shiftL, shiftR, Q_Face[myThreadL], Q_Face[myThreadR], normal0, numericalFluxForEuler)
}
// Store the average solution for this edge - average of the two connected tri edges
for i := 0; i < Nedge; i++ {
indL := kL + (i+shiftL)*KmaxL
indR := kR + (Nedge-1-i+shiftR)*KmaxR
for n := 0; n < 4; n++ {
qFluxForGradient[i][n] = 0.5 * (Q_Face[myThreadL][n].DataP[indL] + Q_Face[myThreadR][n].DataP[indR])
}
}
}
func (c *Euler) calculateNonSharedEdgeFlux(e *DG2D.Edge, Nedge int, Time float64,
k, Kmax, edgeNumber, myThread int,
normal0 [2]float64, numericalFluxForEuler, qFluxForGradient [][4]float64,
Q_Face [][4]utils.Matrix) {
var (
calculateNormalFlux bool
shift = edgeNumber * Nedge
)
calculateNormalFlux = true
switch e.BCType {
case types.BC_Far:
c.FarBC(k, Kmax, shift, Q_Face[myThread], normal0)
case types.BC_IVortex:
c.IVortexBC(Time, k, Kmax, shift, Q_Face[myThread], normal0)
case types.BC_Wall, types.BC_Cyl:
calculateNormalFlux = false
c.WallBC(k, Kmax, Q_Face[myThread], shift, normal0, numericalFluxForEuler) // Calculates normal flux directly
case types.BC_PeriodicReversed, types.BC_Periodic:
// One edge of the Periodic BC leads to calculation of both sides within the connected tris section, so noop here
return
}
if calculateNormalFlux {
var Fx, Fy [4]float64
for i := 0; i < Nedge; i++ {
ie := i + shift
ind := k + ie*Kmax
Fx, Fy = c.CalculateFlux(Q_Face[myThread], ind)
for n := 0; n < 4; n++ {
numericalFluxForEuler[i][n] = normal0[0]*Fx[n] + normal0[1]*Fy[n]
}
}
}
// Store the average solution for this edge - there's only one tri, so it's just a copy of this edge
for i := 0; i < Nedge; i++ {
ie := i + shift
ind := k + ie*Kmax
for n := 0; n < 4; n++ {
qFluxForGradient[i][n] = Q_Face[myThread][n].DataP[ind]
}
}
return
}
func (c *Euler) SetRTFluxOnEdges(myThread, Kmax int, F_RT_DOF [4]utils.Matrix) {
var (
dfr = c.dfr
Nedge = dfr.FluxElement.Nedge
Nint = dfr.FluxElement.Nint
KmaxGlobal = c.dfr.K
)
for k := 0; k < Kmax; k++ {
kGlobal := c.Partitions.GetGlobalK(k, myThread)
for edgeNum := 0; edgeNum < 3; edgeNum++ {
shift := edgeNum * Nedge
//nFlux, sign := c.EdgeStore.GetEdgeNormalFlux(kGlobal, edgeNum, dfr)
ind2 := kGlobal + KmaxGlobal*edgeNum
IInII := dfr.IInII.DataP[ind2]
for n := 0; n < 4; n++ {
nFlux, sign := c.EdgeStore.GetEdgeValues(NumericalFluxForEuler, myThread, k, n, edgeNum, dfr)
rtD := F_RT_DOF[n].DataP
for i := 0; i < Nedge; i++ {
// Place normed/scaled flux into the RT element space
ind := k + (2*Nint+i+shift)*Kmax
if sign > 0 {
rtD[ind] = nFlux[i] * IInII
} else {
rtD[ind] = -nFlux[Nedge-i-1] * IInII
}
}
}
}
}
}
func (c *Euler) InterpolateSolutionToEdges(Q, Q_Face [4]utils.Matrix) {
// Interpolate from solution points to edges using precomputed interpolation matrix
for n := 0; n < 4; n++ {
c.dfr.FluxEdgeInterp.Mul(Q[n], Q_Face[n])
}
return
} | model_problems/Euler2D/edges.go | 0.699768 | 0.603523 | edges.go | starcoder |
package main
import (
"bufio"
"flag"
"fmt"
"os"
"strconv"
"strings"
)
func main() {
// Use Flags to run a part
methodP := flag.String("method", "p2", "The method/part that should be run, valid are p1,p2 and test")
flag.Parse()
switch *methodP {
case "p1":
partOne()
break
case "p2":
partTwo()
break
case "test":
break
}
}
func partOne() {
input := stringInputToInt(readInput())
width := 25
height := 6
pixels, maxLayers := buildPixelMap(input, height, width)
// Now that we have the pixel grid
// Find the layer with lowest amount of zeros
lowestLayer := findLayerWithLowestNumber(pixels, 0, maxLayers)
//fmt.Println("The lowest layer is", lowestLayer)
fmt.Println(len(getLayer(pixels, lowestLayer)))
num1 := findNumberCountInLayer(pixels, 1, lowestLayer)
num2 := findNumberCountInLayer(pixels, 2, lowestLayer)
fmt.Println("The number of 1s multiped by 2s is", num1*num2)
}
func buildPixelMap(input []int, height int, width int) (map[pixel]int, int) {
pixels := make(map[pixel]int)
currentWidth := 0
currentHeight := 0
currentLayer := 0
for _, value := range input {
// Three cases
// 1. Go to next layer
// 2. Go down a row
// 3. Place pixel down
if currentWidth == width-1 && currentHeight == height-1 {
pixels[pixel{x: currentWidth, y: currentHeight, layer: currentLayer}] = value
currentLayer++
currentWidth = 0
currentHeight = 0
} else if currentWidth == width-1 {
pixels[pixel{x: currentWidth, y: currentHeight, layer: currentLayer}] = value
currentHeight++
currentWidth = 0
} else {
pixels[pixel{x: currentWidth, y: currentHeight, layer: currentLayer}] = value
currentWidth++
}
}
// Currentlayer at this point is the max number of layers
return pixels, currentLayer
}
// Find the layer with the lowest number of number
func findLayerWithLowestNumber(pixels map[pixel]int, number int, maxLayers int) int {
lowestLayer := 0
lowestCount := 10000
for layer := 0; layer < maxLayers; layer++ {
total := 0
for i, value := range pixels {
if i.layer == layer && value == number {
total++
}
}
if total < lowestCount {
lowestCount = total
lowestLayer = layer
}
}
return lowestLayer
}
// Find the count of a number in a layer
func findNumberCountInLayer(pixels map[pixel]int, number int, layer int) int {
num := 0
for i, pixelColour := range pixels {
if i.layer == layer {
if pixelColour == number {
num++
}
}
}
return num
}
// Get a layer and return it
func getLayer(pixels map[pixel]int, layer int) map[pixel]int {
newLayer := make(map[pixel]int)
for i, pixelColour := range pixels {
if i.layer == layer {
newLayer[i] = pixelColour
}
}
return newLayer
}
func partTwo() {
input := stringInputToInt(readInput())
width := 25
height := 6
pixels, maxLayers := buildPixelMap(input, height, width)
// Now that we have the pixel grid
// Layer all pixels over each other to find the "image layer"
imageLayer := buildImageLayer(pixels, width, height, maxLayers)
fmt.Print(printLayer(imageLayer, width, height))
}
func buildImageLayer(pixels map[pixel]int, width int, height int, maxLayers int) map[pixel]int {
mergedLayer := make(map[pixel]int)
for h := 0; h < height; h++ {
for w := 0; w < width; w++ {
mergedLayers := getAllPixelsFromAllLayersForPoint(pixels, w, h, maxLayers)
// Find first visible pixel
for _, value := range mergedLayers {
if value == 2 {
continue
} else if value == 1 {
mergedLayer[pixel{x: w, y: h, layer: 0}] = 1
break
} else if value == 0 {
mergedLayer[pixel{x: w, y: h, layer: 0}] = 0
break
}
}
}
}
fmt.Println(len(mergedLayer))
return mergedLayer
}
func getAllPixelsFromAllLayersForPoint(pixels map[pixel]int, width int, height int, maxLayers int) []int {
var allPixels []int
for i := 0; i < maxLayers; i++ {
allPixels = append(allPixels, pixels[pixel{x: width, y: height, layer: i}])
}
return allPixels
}
// Print out this image layer using unicode boxes
func printLayer(pixels map[pixel]int, width int, height int) string {
image := ""
for h := 0; h < height; h++ {
for w := 0; w < width; w++ {
switch pixels[pixel{x: w, y: h, layer: 0}] {
case 0:
image += "\u25A0" // black
break
case 1:
image += "\u25A1" // white
break
case 2:
image += " " // transparent
break
}
}
image += "\n"
}
return image
}
type pixel struct {
x, y, layer int
}
// Read data from input.txt
// Return the string, so that we can deal with it however
func readInput() []string {
var input []string
f, _ := os.Open("input.txt")
scanner := bufio.NewScanner(f)
for scanner.Scan() {
input = append(input, scanner.Text())
}
return input
}
// Turn a string input to a int input
func stringInputToInt(stringInput []string) []int {
var intInput []int
for _, val := range strings.Split(stringInput[0], "") {
i, _ := strconv.Atoi(val)
intInput = append(intInput, i)
}
return intInput
} | 2019/8-SpaceImage/main.go | 0.601594 | 0.492066 | main.go | starcoder |
package bls12381
import (
"errors"
"math/big"
)
type fp2Temp struct {
t [4]*fe
}
type fp2 struct {
fp2Temp
}
func newFp2Temp() fp2Temp {
t := [4]*fe{}
for i := 0; i < len(t); i++ {
t[i] = &fe{}
}
return fp2Temp{t}
}
func newFp2() *fp2 {
t := newFp2Temp()
return &fp2{t}
}
func (e *fp2) fromBytes(in []byte) (*fe2, error) {
if len(in) != 2*fpByteSize {
return nil, errors.New("input string should be larger than 96 bytes")
}
c1, err := fromBytes(in[:fpByteSize])
if err != nil {
return nil, err
}
c0, err := fromBytes(in[fpByteSize:])
if err != nil {
return nil, err
}
return &fe2{*c0, *c1}, nil
}
func (e *fp2) toBytes(a *fe2) []byte {
out := make([]byte, 2*fpByteSize)
copy(out[:fpByteSize], toBytes(&a[1]))
copy(out[fpByteSize:], toBytes(&a[0]))
return out
}
func (e *fp2) new() *fe2 {
return new(fe2).zero()
}
func (e *fp2) zero() *fe2 {
return new(fe2).zero()
}
func (e *fp2) one() *fe2 {
return new(fe2).one()
}
func (e *fp2) fromMont(c, a *fe2) {
// c0 = a0 / r
// c1 = a1 / r
fromMont(&c[0], &a[0])
fromMont(&c[1], &a[1])
}
func (e *fp2) add(c, a, b *fe2) {
// c0 = a0 + b0
// c1 = a1 + b1
add(&c[0], &a[0], &b[0])
add(&c[1], &a[1], &b[1])
}
func (e *fp2) addAssign(a, b *fe2) {
// a0 = a0 + b0
// a1 = a1 + b1
addAssign(&a[0], &b[0])
addAssign(&a[1], &b[1])
}
func (e *fp2) ladd(c, a, b *fe2) {
// c0 = a0 + b0
// c1 = a1 + b1
ladd(&c[0], &a[0], &b[0])
ladd(&c[1], &a[1], &b[1])
}
func (e *fp2) double(c, a *fe2) {
// c0 = 2a0
// c1 = 2a1
double(&c[0], &a[0])
double(&c[1], &a[1])
}
func (e *fp2) doubleAssign(a *fe2) {
// a0 = 2a0
// a1 = 2a1
doubleAssign(&a[0])
doubleAssign(&a[1])
}
func (e *fp2) ldouble(c, a *fe2) {
// c0 = 2a0
// c1 = 2a1
ldouble(&c[0], &a[0])
ldouble(&c[1], &a[1])
}
func (e *fp2) sub(c, a, b *fe2) {
// c0 = a0 - b0
// c1 = a1 - b1
sub(&c[0], &a[0], &b[0])
sub(&c[1], &a[1], &b[1])
}
func (e *fp2) subAssign(c, a *fe2) {
// a0 = a0 - b0
// a1 = a1 - b1
subAssign(&c[0], &a[0])
subAssign(&c[1], &a[1])
}
func (e *fp2) neg(c, a *fe2) {
// c0 = -a0
// c1 = -a1
neg(&c[0], &a[0])
neg(&c[1], &a[1])
}
func (e *fp2) conjugate(c, a *fe2) {
// c0 = a0
// c1 = -a1
c[0].set(&a[0])
neg(&c[1], &a[1])
}
func (e *fp2) mul(c, a, b *fe2) {
t := e.t
// Guide to Pairing Based Cryptography
// Algorithm 5.16
mul(t[1], &a[0], &b[0]) // a0b0
mul(t[2], &a[1], &b[1]) // a1b1
ladd(t[0], &a[0], &a[1]) // a0 + a1
ladd(t[3], &b[0], &b[1]) // b0 + b1
sub(&c[0], t[1], t[2]) // c0 = a0b0 - a1b1
addAssign(t[1], t[2]) // a0b0 + a1b1
mul(t[0], t[0], t[3]) // (a0 + a1)(b0 + b1)
sub(&c[1], t[0], t[1]) // c1 = (a0 + a1)(b0 + b1) - (a0b0 + a1b1)
}
func (e *fp2) mulAssign(a, b *fe2) {
t := e.t
mul(t[1], &a[0], &b[0])
mul(t[2], &a[1], &b[1])
ladd(t[0], &a[0], &a[1])
ladd(t[3], &b[0], &b[1])
sub(&a[0], t[1], t[2])
addAssign(t[1], t[2])
mul(t[0], t[0], t[3])
sub(&a[1], t[0], t[1])
}
func (e *fp2) square(c, a *fe2) {
t := e.t
// Guide to Pairing Based Cryptography
// Algorithm 5.16
ladd(t[0], &a[0], &a[1]) // (a0 + a1)
sub(t[1], &a[0], &a[1]) // (a0 - a1)
ldouble(t[2], &a[0]) // 2a0
mul(&c[0], t[0], t[1]) // c0 = (a0 + a1)(a0 - a1)
mul(&c[1], t[2], &a[1]) // c1 = 2a0a1
}
func (e *fp2) squareAssign(a *fe2) {
t := e.t
ladd(t[0], &a[0], &a[1])
sub(t[1], &a[0], &a[1])
ldouble(t[2], &a[0])
mul(&a[0], t[0], t[1])
mul(&a[1], t[2], &a[1])
}
func (e *fp2) mul0(c, a *fe2, b *fe) {
mul(&c[0], &a[0], b)
mul(&c[1], &a[1], b)
}
func (e *fp2) mulByNonResidue(c, a *fe2) {
t := e.t
// c0 = (a0 - a1)
// c1 = (a0 + a1)
sub(t[0], &a[0], &a[1])
add(&c[1], &a[0], &a[1])
c[0].set(t[0])
}
func (e *fp2) mulByB(c, a *fe2) {
t := e.t
// c0 = 4a0 - 4a1
// c1 = 4a0 + 4a1
double(t[0], &a[0])
doubleAssign(t[0])
double(t[1], &a[1])
doubleAssign(t[1])
sub(&c[0], t[0], t[1])
add(&c[1], t[0], t[1])
}
func (e *fp2) inverse(c, a *fe2) {
t := e.t
// Guide to Pairing Based Cryptography
// Algorithm 5.16
square(t[0], &a[0]) // a0^2
square(t[1], &a[1]) // a1^2
addAssign(t[0], t[1]) // a0^2 + a1^2
inverse(t[0], t[0]) // (a0^2 + a1^2)^-1
mul(&c[0], &a[0], t[0]) // c0 = a0(a0^2 + a1^2)^-1
mul(t[0], t[0], &a[1]) // a1(a0^2 + a1^2)^-1
neg(&c[1], t[0]) // c1 = a1(a0^2 + a1^2)^-1
}
func (e *fp2) inverseBatch(in []fe2) {
n, N, setFirst := 0, len(in), false
for i := 0; i < len(in); i++ {
if !in[i].isZero() {
n++
}
}
tA := make([]fe2, n)
tB := make([]fe2, n)
// a, ab, abc, abcd, ...
for i, j := 0, 0; i < N; i++ {
if !in[i].isZero() {
if !setFirst {
setFirst = true
tA[j].set(&in[i])
} else {
e.mul(&tA[j], &in[i], &tA[j-1])
}
j = j + 1
}
}
// (abcd...)^-1
e.inverse(&tB[n-1], &tA[n-1])
// a^-1, ab^-1, abc^-1, abcd^-1, ...
for i, j := N-1, n-1; j != 0; i-- {
if !in[i].isZero() {
e.mul(&tB[j-1], &tB[j], &in[i])
j = j - 1
}
}
// a^-1, b^-1, c^-1, d^-1
for i, j := 0, 0; i < N; i++ {
if !in[i].isZero() {
if setFirst {
setFirst = false
in[i].set(&tB[j])
} else {
e.mul(&in[i], &tA[j-1], &tB[j])
}
j = j + 1
}
}
}
func (e *fp2) exp(c, a *fe2, s *big.Int) {
z := e.one()
for i := s.BitLen() - 1; i >= 0; i-- {
e.square(z, z)
if s.Bit(i) == 1 {
e.mul(z, z, a)
}
}
c.set(z)
}
func (e *fp2) frobeniusMap1(a *fe2) {
e.conjugate(a, a)
}
func (e *fp2) frobeniusMap(a *fe2, power int) {
if power&1 == 1 {
e.conjugate(a, a)
}
}
func (e *fp2) sqrt(c, a *fe2) bool {
u, x0, a1, alpha := &fe2{}, &fe2{}, &fe2{}, &fe2{}
u.set(a)
e.exp(a1, a, pMinus3Over4)
e.square(alpha, a1)
e.mul(alpha, alpha, a)
e.mul(x0, a1, a)
if alpha.equal(negativeOne2) {
neg(&c[0], &x0[1])
c[1].set(&x0[0])
return true
}
e.add(alpha, alpha, e.one())
e.exp(alpha, alpha, pMinus1Over2)
e.mul(c, alpha, x0)
e.square(alpha, c)
return alpha.equal(u)
}
func (e *fp2) isQuadraticNonResidue(a *fe2) bool {
c0, c1 := new(fe), new(fe)
square(c0, &a[0])
square(c1, &a[1])
add(c1, c1, c0)
return isQuadraticNonResidue(c1)
} | fp2.go | 0.512693 | 0.444203 | fp2.go | starcoder |
package tmxscripter
import (
"github.com/kurrik/tmxgo"
)
type ScriptableMap struct {
*tmxgo.Map
}
func NewScriptableMap(m *tmxgo.Map) *ScriptableMap {
return &ScriptableMap{
Map: m,
}
}
// Returns a layer with the given name if one exists.
func (m *ScriptableMap) GetLayer(name string) *ScriptableLayer {
if l, err := m.LayerByName(name); err != nil {
return nil
} else {
return NewScriptableLayer(l)
}
}
// Adds a new layer with the given name to the map. Tile IDs will be 0.
func (m *ScriptableMap) AddLayer(name string) *ScriptableLayer {
var grid = tmxgo.DataTileGrid{
Width: int(m.Width),
Height: int(m.Height),
Tiles: make([][]tmxgo.DataTileGridTile, m.Width),
}
for x := 0; x < int(m.Width); x++ {
grid.Tiles[x] = make([]tmxgo.DataTileGridTile, m.Height)
for y := 0; y < int(m.Height); y++ {
grid.Tiles[x][y] = tmxgo.DataTileGridTile{
Id: 0,
FlipX: false,
FlipY: false,
FlipD: false,
}
}
}
var layer = &tmxgo.Layer{
Name: name,
Width: m.Width,
Height: m.Height,
Data: &tmxgo.Data{},
}
layer.SetGrid(grid)
m.Layers = append(m.Layers, layer)
return NewScriptableLayer(layer)
}
type ScriptableLayer struct {
*tmxgo.Layer
}
func NewScriptableLayer(l *tmxgo.Layer) *ScriptableLayer {
return &ScriptableLayer{
Layer: l,
}
}
// Returns a scriptable grid for this layer.
func (l *ScriptableLayer) GetGrid() *ScriptableGrid {
return NewScriptableGrid(l.Layer)
}
type ScriptableGrid struct {
*tmxgo.DataTileGrid
*tmxgo.Layer
}
func NewScriptableGrid(l *tmxgo.Layer) *ScriptableGrid {
if g, err := l.GetGrid(); err != nil {
return nil
} else {
return &ScriptableGrid{
DataTileGrid: &g,
Layer: l,
}
}
}
// Returns the width of the grid in tiles.
func (g *ScriptableGrid) Width() int {
return g.DataTileGrid.Width
}
// Returns the height of the grid in tiles.
func (g *ScriptableGrid) Height() int {
return g.DataTileGrid.Height
}
// Saves the grid back into the layer.
func (g *ScriptableGrid) Save() {
g.Layer.SetGrid(*g.DataTileGrid)
}
// Returns the tile at the specified location.
func (g *ScriptableGrid) TileAt(x int, y int) *ScriptableTile {
return NewScriptableTile(&g.Tiles[x][y])
}
// Returns a linear array of tiles.
func (g *ScriptableGrid) TileList() []*ScriptableTile {
var tiles = make([]*ScriptableTile, g.DataTileGrid.Width*g.DataTileGrid.Height)
for y := 0; y < g.DataTileGrid.Height; y++ {
for x := 0; x < g.DataTileGrid.Width; x++ {
tiles[y*g.DataTileGrid.Width+x] = NewScriptableTile(&g.Tiles[x][y])
}
}
return tiles
}
// Represents a tile object. Has Id, FlipX, FlipY and FlipD attributes.
type ScriptableTile struct {
*tmxgo.DataTileGridTile
}
func NewScriptableTile(t *tmxgo.DataTileGridTile) *ScriptableTile {
return &ScriptableTile{
DataTileGridTile: t,
}
} | tmxscripter/api.go | 0.814348 | 0.423756 | api.go | starcoder |
package splunk
// nolint: dupl
// LogEntryRingBuffer is a ring buffer that supports inserting and reading
// chunks of elements in an orderly fashion. It is NOT thread-safe and the
// returned batches are not copied, they are a slice against the original
// backing array of this logEntry. This means that if the buffer wraps around,
// elements in the slice returned by NextBatch will be changed, and you are
// subject to all of the rules of Go's memory model if accessing the data in a
// separate goroutine.
type LogEntryRingBuffer struct {
// The main buffer
buffer []logEntry
// Store length explicitly as optimization
bufferLen int
nextIdx int
// How many times around the ring buffer we have gone when putting
// datapoints onto the buffer
writtenCircuits int64
// The index that indicates the last read position in the buffer. It is
// one greater than the actual index, to match the golang slice high range.
readHigh int
// How many elements are in the buffer on which processing has not begun.
// This could be calculated from readHigh and nextIdx on demand, but
// precalculate it in Add and NextBatch since it tends to get read often.
// Also by precalculating it, we can tell if the buffer was completely
// overwritten since the last read.
unprocessed int
}
// NewLogEntryRingBuffer creates a new initialized buffer ready for use.
func NewLogEntryRingBuffer(size int) *LogEntryRingBuffer {
return &LogEntryRingBuffer{
// Preallocate the buffer to its maximum length
buffer: make([]logEntry, size),
bufferLen: size,
}
}
// Add an logEntry to the buffer. It will overwrite any existing element in the
// buffer as the buffer wraps around. Returns whether the new element
// overwrites an uncommitted element already in the buffer.
func (b *LogEntryRingBuffer) Add(inst logEntry) (isOverwrite bool) {
if b.unprocessed >= b.bufferLen {
isOverwrite = true
// Drag the read cursor along with the overwritten elements
b.readHigh++
if b.readHigh > b.bufferLen {
// Wrap around to cover the 0th element of the buffer
b.readHigh = 1
}
} else {
b.unprocessed++
}
b.buffer[b.nextIdx] = inst
b.nextIdx++
if b.nextIdx == b.bufferLen { // Wrap around the buffer
b.nextIdx = 0
b.writtenCircuits++
}
return isOverwrite
}
// Size returns how many elements can fit in the buffer at once.
func (b *LogEntryRingBuffer) Size() int {
return b.bufferLen
}
// UnprocessedCount returns the number of elements that have been written to
// the buffer but not read via NextBatch.
func (b *LogEntryRingBuffer) UnprocessedCount() int {
return b.unprocessed
}
// NextBatch returns the next batch of unprocessed elements. If there are
// none, this can return nil.
func (b *LogEntryRingBuffer) NextBatch(maxSize int) []logEntry {
prevReadHigh := b.readHigh
if prevReadHigh == b.bufferLen {
// Wrap around
prevReadHigh = 0
}
if b.unprocessed == 0 {
return nil
}
targetSize := b.unprocessed
if targetSize > maxSize {
targetSize = maxSize
}
b.readHigh = prevReadHigh + targetSize
if b.readHigh > b.bufferLen {
// Wrap around happened, just take what we have left until wrap around
// so that we can take a single slice of it since slice ranges can't
// wrap around.
b.readHigh = b.bufferLen
}
b.unprocessed -= b.readHigh - prevReadHigh
out := b.buffer[prevReadHigh:b.readHigh]
return out
} | pkg/core/writer/splunk/log_event_ring.gen.go | 0.608129 | 0.458288 | log_event_ring.gen.go | starcoder |
package mos65xx
import (
"fmt"
)
const (
condPass = " \x1b[1;32m✓\x1b[0m "
condFail = " \x1b[1;31m✗\x1b[0m "
condEqual = "\x1b[1;37m=\x1b[0m"
condRange = "\x1b[1;37m∈\x1b[0m"
)
// cond is a condition checker for our test harness
type cond interface {
Cond(Instruction) bool
String() string
}
// conds are multiple conditions combined
type conds struct {
Any bool // Any condition that returns true will make this pass
Conds []cond // conditions
res []bool
met, not []cond
}
func (t *conds) Cond(in Instruction) bool {
// Each test restes the conditions
t.res = make([]bool, len(t.Conds))
t.met = make([]cond, 0, len(t.Conds))
t.not = make([]cond, 0, len(t.Conds))
// Check conditions
for i, c := range t.Conds {
if t.res[i] = c.Cond(in); t.res[i] {
t.met = append(t.met, c)
} else {
t.not = append(t.not, c)
}
}
if t.Any {
// Any condition has to be met
return len(t.met) > 0
}
// All conditions have to be met
return len(t.met) == len(t.Conds)
}
// Reason (only valid for Any mode)
func (t *conds) Reason() string {
if len(t.met) > 0 {
return t.met[0].String()
}
return ""
}
func (t *conds) Print(f func(string)) {
if len(t.met) == 0 {
f(" no conditions met")
} else {
f(" conditions met:")
for _, c := range t.met {
f(condPass + c.String())
}
}
if len(t.not) == 0 {
f(" no conditions unmet")
} else {
f(" conditions unmet:")
for _, c := range t.not {
f(condFail + c.String())
}
}
}
// Register value conditions
type (
condPC uint16
condP uint8
condS uint8
condA uint8
condX uint8
condY uint8
)
func (t condPC) Cond(in Instruction) bool { return in.CPU.Registers().PC == uint16(t) }
func (t condPC) String() string { return fmt.Sprintf("PC %s $%04X", condEqual, uint16(t)) }
func (t condP) Cond(in Instruction) bool { return in.CPU.Registers().P == uint8(t) }
func (t condP) String() string { return fmt.Sprintf("P %s $%02X", condEqual, uint8(t)) }
func (t condS) Cond(in Instruction) bool { return in.CPU.Registers().S == uint8(t) }
func (t condS) String() string { return fmt.Sprintf("S %s $%02X", condEqual, uint8(t)) }
func (t condA) Cond(in Instruction) bool { return in.CPU.Registers().A == uint8(t) }
func (t condA) String() string { return fmt.Sprintf("A %s $%02X", condEqual, uint8(t)) }
func (t condX) Cond(in Instruction) bool { return in.CPU.Registers().X == uint8(t) }
func (t condX) String() string { return fmt.Sprintf("X %s $%02X", condEqual, uint8(t)) }
func (t condY) Cond(in Instruction) bool { return in.CPU.Registers().Y == uint8(t) }
func (t condY) String() string { return fmt.Sprintf("Y %s $%02X", condEqual, uint8(t)) }
// condCycles are conditional cycle boundaries
type condCycles [2]int
func (t condCycles) Cond(in Instruction) bool {
if t[0] == t[1] || t[1] < t[0] {
return in.Cycles >= t[0]
}
return in.Cycles >= t[0] && in.Cycles <= t[1]
}
func (t condCycles) String() string {
if t[0] == t[1] || t[1] < t[0] {
return fmt.Sprintf("cycles %s %d", condEqual, t[0])
}
return fmt.Sprintf("cycles %s [%d, %d]", condRange, t[0], t[1])
}
// condOp is a condition for hitting a mnemonic
type condOp Mnemonic
func (t condOp) Cond(in Instruction) bool {
return in.Mnemonic == Mnemonic(t)
}
func (t condOp) String() string {
return fmt.Sprintf("opcode %s $%02X (%s)", condEqual, uint8(t), Mnemonic(t))
}
// condByte is a condition for the value of byte at Addr
type condByte struct {
Addr uint16 // Address of the byte
Value uint8 // Value for comparison
}
func (t condByte) Cond(in Instruction) bool {
return in.CPU.Fetch(t.Addr) == t.Value
}
func (t condByte) String() string {
return fmt.Sprintf("$%04X %s $%02X", t.Addr, condEqual, t.Value)
}
// contTrap is a condition for looping jumps
type condTrap struct{}
func (t condTrap) Cond(in Instruction) bool {
switch in.Mnemonic {
case JMP, JSR:
addr := in.Addr()
if in.AddressMode == Indirect {
addr = FetchWord(in.CPU, addr)
}
return in.Registers.PC == addr
default:
return false
}
}
func (t condTrap) String() string {
return "PC trapped"
}
// condStack compiles a slice of condByte to match data in the stack
func condStack(stack ...uint8) []cond {
var (
l = uint16(len(stack))
c = make([]cond, l)
)
for i, b := range stack {
c[i] = condByte{0x0200 - l + uint16(i), b}
}
return c
}
func condString(addr uint16, value string) []cond {
var (
l = uint16(len(value))
c = make([]cond, l)
)
for i, b := range []byte(value) {
c[i] = condByte{addr + uint16(i), b}
}
return c
} | cond.go | 0.553505 | 0.442877 | cond.go | starcoder |
package matrix
// Matrix : a matrix
type Matrix struct {
Row, Col int
Values []float64
}
// ScalAdd : return a matrix by adding scal element wise
func ScalAdd(m *Matrix, scal float64) *Matrix {
values := make([]float64, m.Col*m.Row)
for i := 0; i < m.Row*m.Col; i++ {
values[i] = m.Values[i] + scal
}
return &Matrix{Row: m.Row, Col: m.Col, Values: values}
}
// Add : return a matrix from the addition of two matrixes element wise
func Add(m1 *Matrix, m2 *Matrix) *Matrix {
values := make([]float64, m1.Col*m1.Row)
for i := 0; i < m1.Row*m1.Col; i++ {
val1 := m1.Values[i]
val2 := m2.Values[i]
val := val1 + val2
values[i] = val
}
return &Matrix{Row: m1.Row, Col: m1.Col, Values: values}
}
// Sub : return a matrix from the substraction of two matrixes element wise
func Sub(m1 *Matrix, m2 *Matrix) *Matrix {
values := make([]float64, m1.Col*m1.Row)
for i := 0; i < m1.Row*m1.Col; i++ {
val1 := m1.Values[i]
val2 := m2.Values[i]
val := val1 - val2
values[i] = val
}
return &Matrix{Row: m1.Row, Col: m1.Col, Values: values}
}
// ScalMult : return a matrix by multiplying element wise by scal
func ScalMult(m *Matrix, scal float64) *Matrix {
values := make([]float64, m.Col*m.Row)
for i := 0; i < m.Row*m.Col; i++ {
values[i] = m.Values[i] * scal
}
return &Matrix{Row: m.Row, Col: m.Col, Values: values}
}
// Transpose : return a matrix by transposition
func Transpose(m *Matrix) *Matrix {
values := make([]float64, m.Col*m.Row)
for i := 0; i < m.Row; i++ {
for j := 0; j < m.Col; j++ {
values[i*m.Col+j] = m.Values[j*m.Row+i]
}
}
return &Matrix{Row: m.Col, Col: m.Row, Values: values}
}
// Mult : return a matrix from the multiplication of two matrixes element wise
func Mult(m1 *Matrix, m2 *Matrix) *Matrix {
values := make([]float64, m1.Col*m1.Row)
for i := 0; i < m1.Row*m1.Col; i++ {
val1 := m1.Values[i]
val2 := m2.Values[i]
val := val1 * val2
values[i] = val
}
return &Matrix{Row: m1.Row, Col: m1.Col, Values: values}
}
// Dot : return a matrix by multiplying two Matrixes
func Dot(m1 *Matrix, m2 *Matrix) *Matrix {
values := make([]float64, m1.Row*m2.Col)
for i := 0; i < m1.Row; i++ {
for j := 0; j < m2.Col; j++ {
for k := 0; k < m1.Col; k++ {
values[i*m2.Col+j] += m1.Values[i*m1.Col+k] * m2.Values[k*m2.Col+j]
}
}
}
return &Matrix{Row: m1.Row, Col: m2.Col, Values: values}
}
// Trace : return the trace of a square matrix
func Trace(m *Matrix) float64 {
value := float64(0)
for i := 0; i < m.Row; i++ {
value += m.Values[i*m.Col+i]
}
return value
}
// Apply : return a matrix by applying function element wise
func Apply(m *Matrix, fn func(v float64) float64) *Matrix {
values := make([]float64, m.Col*m.Row)
for i := 0; i < m.Row*m.Col; i++ {
values[i] = fn(m.Values[i])
}
return &Matrix{Row: m.Row, Col: m.Col, Values: values}
} | matrix/matrix.go | 0.851506 | 0.803482 | matrix.go | starcoder |
package forGraphBLASGo
import (
"github.com/intel/forGoParallel/pipeline"
)
type mxV[Dw, DA, Du any] struct {
op Semiring[Dw, DA, Du]
A *matrixReference[DA]
u *vectorReference[Du]
}
func newMxV[Dw, DA, Du any](
op Semiring[Dw, DA, Du],
A *matrixReference[DA],
u *vectorReference[Du],
) computeVectorT[Dw] {
return mxV[Dw, DA, Du]{
op: op,
A: A,
u: u,
}
}
func (compute mxV[Dw, DA, Du]) resize(newSize int) computeVectorT[Dw] {
_, ncols := compute.A.size()
A := compute.A.resize(newSize, ncols)
return newMxV[Dw, DA, Du](compute.op, A, compute.u)
}
func (compute mxV[Dw, DA, Du]) computeElement(index int) (result Dw, ok bool) {
add := compute.op.addition().operator()
mult := compute.op.multiplication()
ap := compute.A.getRowPipeline(index)
if ap == nil {
return
}
up := compute.u.getPipeline()
if up == nil {
return
}
return vectorPipelineReduce(makeVector2SourcePipeline(ap, up,
func(index int, aValue DA, aok bool, uValue Du, uok bool) (result Dw, ok bool) {
if aok && uok {
return mult(aValue, uValue), true
}
return
}), add)
}
func (compute mxV[Dw, DA, Du]) computePipeline() *pipeline.Pipeline[any] {
add := compute.op.addition().operator()
mult := compute.op.multiplication()
go compute.u.optimize()
if compute.u.nvals() == 0 {
return nil
}
rowPipelines := compute.A.getRowPipelines()
var p pipeline.Pipeline[any]
p.Source(pipeline.NewFunc[any](-1, func(size int) (data any, fetched int, err error) {
var result vectorSlice[Dw]
for fetched < size && len(rowPipelines) > 0 {
value, ok := vectorPipelineReduce(makeVector2SourcePipeline(rowPipelines[0].p, compute.u.getPipeline(),
func(index int, aValue DA, aok bool, uValue Du, uok bool) (result Dw, ok bool) {
if aok && uok {
return mult(aValue, uValue), true
}
return
},
), add)
if ok {
result.indices = append(result.indices, rowPipelines[0].index)
result.values = append(result.values, value)
fetched++
}
rowPipelines[0].p = nil
rowPipelines = rowPipelines[1:]
}
return result, fetched, nil
}))
return &p
} | functional_Vector_ComputedMxV.go | 0.529993 | 0.551332 | functional_Vector_ComputedMxV.go | starcoder |
package promql
import (
"fmt"
"regexp"
"time"
"github.com/wolffcm/flux/ast"
"github.com/influxdata/promql/v2"
"github.com/influxdata/promql/v2/pkg/labels"
)
var labelMatchOps = map[labels.MatchType]ast.OperatorKind{
labels.MatchEqual: ast.EqualOperator,
labels.MatchNotEqual: ast.NotEqualOperator,
labels.MatchRegexp: ast.RegexpMatchOperator,
labels.MatchNotRegexp: ast.NotRegexpMatchOperator,
}
func transpileLabelMatchersFn(lms []*labels.Matcher) *ast.FunctionExpression {
return &ast.FunctionExpression{
Params: []*ast.Property{
{
Key: &ast.Identifier{Name: "r"},
},
},
Body: transpileLabelMatchers(lms),
}
}
func transpileLabelMatchers(lms []*labels.Matcher) ast.Expression {
if len(lms) == 0 {
panic("empty label matchers")
}
if len(lms) == 1 {
return transpileLabelMatcher(lms[0])
}
return &ast.LogicalExpression{
Operator: ast.AndOperator,
Left: transpileLabelMatcher(lms[0]),
// Recurse until we have all label matchers AND-ed together in a right-heavy tree.
Right: transpileLabelMatchers(lms[1:]),
}
}
func transpileLabelMatcher(lm *labels.Matcher) *ast.BinaryExpression {
op, ok := labelMatchOps[lm.Type]
if !ok {
panic(fmt.Errorf("invalid label matcher type %v", lm.Type))
}
be := &ast.BinaryExpression{
Operator: op,
Left: member("r", lm.Name),
}
if op == ast.EqualOperator || op == ast.NotEqualOperator {
be.Right = &ast.StringLiteral{Value: lm.Value}
} else {
// PromQL parsing already validates regexes.
// PromQL regexes are always full-string matches / fully anchored.
be.Right = &ast.RegexpLiteral{Value: regexp.MustCompile("^(?:" + lm.Value + ")$")}
}
return be
}
var dropMeasurementCall = call(
"drop",
map[string]ast.Expression{
"columns": &ast.ArrayExpression{
Elements: []ast.Expression{
&ast.StringLiteral{Value: "_measurement"},
},
},
},
)
func (t *Transpiler) transpileInstantVectorSelector(v *promql.VectorSelector) *ast.PipeExpression {
var windowCall *ast.CallExpression
var windowFilterCall *ast.CallExpression
if t.Resolution > 0 {
// For range queries:
// At every resolution step, load / look back up to 5m of data (PromQL lookback delta).
windowCall = call("window", map[string]ast.Expression{
"every": &ast.DurationLiteral{Values: []ast.Duration{{Magnitude: t.Resolution.Nanoseconds(), Unit: "ns"}}},
"period": &ast.DurationLiteral{Values: []ast.Duration{{Magnitude: 5, Unit: "m"}}},
"offset": &ast.DurationLiteral{Values: []ast.Duration{{Magnitude: t.Start.Add(-v.Offset).UnixNano() % t.Resolution.Nanoseconds(), Unit: "ns"}}},
})
// Remove any windows <5m long at the edges of the graph range to act like PromQL.
windowFilterCall = call("filter", map[string]ast.Expression{"fn": windowCutoffFn(t.Start.Add(-v.Offset), t.End.Add(-5*time.Minute-v.Offset))})
}
return buildPipeline(
// Select all Prometheus data.
call("from", map[string]ast.Expression{"bucket": &ast.StringLiteral{Value: t.Bucket}}),
// Query entire graph range.
call("range", map[string]ast.Expression{
"start": &ast.DateTimeLiteral{Value: t.Start.Add(-5*time.Minute - v.Offset)},
"stop": &ast.DateTimeLiteral{Value: t.End.Add(-v.Offset)},
}),
// Apply label matching filters.
call("filter", map[string]ast.Expression{"fn": transpileLabelMatchersFn(v.LabelMatchers)}),
windowCall,
windowFilterCall,
// Select the last data point after the current evaluation (resolution step) timestamp.
call("last", nil),
// Apply offsets to make past data look like it's in the present.
call("timeShift", map[string]ast.Expression{
"duration": &ast.DurationLiteral{Values: []ast.Duration{{Magnitude: v.Offset.Nanoseconds(), Unit: "ns"}}},
}),
dropMeasurementCall,
)
}
func (t *Transpiler) transpileRangeVectorSelector(v *promql.MatrixSelector) *ast.PipeExpression {
var windowCall *ast.CallExpression
var windowFilterCall *ast.CallExpression
if t.Resolution > 0 {
// For range queries:
// At every resolution step, include the specified range of data.
windowCall = call("window", map[string]ast.Expression{
"every": &ast.DurationLiteral{Values: []ast.Duration{{Magnitude: t.Resolution.Nanoseconds(), Unit: "ns"}}},
"period": &ast.DurationLiteral{Values: []ast.Duration{{Magnitude: v.Range.Nanoseconds(), Unit: "ns"}}},
"offset": &ast.DurationLiteral{Values: []ast.Duration{{Magnitude: t.Start.UnixNano() % t.Resolution.Nanoseconds(), Unit: "ns"}}},
})
// Remove any windows smaller than the specified range at the edges of the graph range.
windowFilterCall = call("filter", map[string]ast.Expression{"fn": windowCutoffFn(t.Start.Add(-v.Offset), t.End.Add(-v.Range-v.Offset))})
}
return buildPipeline(
// Select all Prometheus data.
call("from", map[string]ast.Expression{"bucket": &ast.StringLiteral{Value: t.Bucket}}),
// Query entire graph range.
call("range", map[string]ast.Expression{
"start": &ast.DateTimeLiteral{Value: t.Start.Add(-v.Range - v.Offset)},
"stop": &ast.DateTimeLiteral{Value: t.End.Add(-v.Offset)},
}),
// Apply label matching filters.
call("filter", map[string]ast.Expression{"fn": transpileLabelMatchersFn(v.LabelMatchers)}),
windowCall,
windowFilterCall,
// Apply offsets to make past data look like it's in the present.
call("timeShift", map[string]ast.Expression{
"duration": &ast.DurationLiteral{Values: []ast.Duration{{Magnitude: v.Offset.Nanoseconds(), Unit: "ns"}}},
}),
dropMeasurementCall,
)
} | promql/selector.go | 0.625667 | 0.499084 | selector.go | starcoder |
package goalgorithms
func mergeTopDown(a []int, b []int, i, size int) {
l := i
lsize := size/2 + size%2
r := i + lsize
rsize := size - lsize
if lsize > 1 {
mergeTopDown(a, b, l, lsize)
}
if rsize > 1 {
mergeTopDown(a, b, r, rsize)
}
lmax := l + lsize
rmax := r + rsize
z := 0
for z < size {
if l == lmax {
b[z] = a[r]
r++
} else if r == rmax {
b[z] = a[l]
l++
} else if a[l] <= a[r] {
b[z] = a[l]
l++
} else {
b[z] = a[r]
r++
}
z++
}
for z := 0; z < size; z++ {
a[i+z] = b[z]
}
}
// MergeSortTopDown performs in-place sort of int slice in ascending order.
func MergeSortTopDown(a []int) {
b := make([]int, len(a), len(a))
mergeTopDown(a, b, 0, len(a))
}
func mergeTopDown2(a []int, b []int, left, right int) {
middle := left + ((right - left) / 2)
if middle-left > 1 {
mergeTopDown2(a, b, left, middle)
}
if right-middle > 1 {
mergeTopDown2(a, b, middle, right)
}
s := right - left
l := left
r := middle
z := 0
for z < s {
if l == middle {
b[z] = a[r]
r++
} else if r == right {
b[z] = a[l]
l++
} else if a[l] <= a[r] {
b[z] = a[l]
l++
} else {
b[z] = a[r]
r++
}
z++
}
for s := 0; s < z; s++ {
a[left+s] = b[s]
}
}
// MergeSortTopDown2 performs in-place sort of int slice in ascending order.
func MergeSortTopDown2(a []int) {
b := make([]int, len(a), len(a))
mergeTopDown2(a, b, 0, len(a))
}
func mergeTopDown3(a []int, b []int, left, right int) {
middle := left + ((right - left) / 2)
if middle-left > 1 {
mergeTopDown3(a, b, left, middle)
}
if right-middle > 1 {
mergeTopDown3(a, b, middle, right)
}
l := left
r := middle
for z := left; z < right; z++ {
if l < middle && (r == right || a[l] <= a[r]) {
b[z] = a[l]
l++
} else {
b[z] = a[r]
r++
}
}
for z := left; z < right; z++ {
a[z] = b[z]
}
}
// MergeSortTopDown3 performs in-place sort of int slice in ascending order.
func MergeSortTopDown3(a []int) {
b := make([]int, len(a), len(a))
mergeTopDown3(a, b, 0, len(a))
}
// MergeSortBottomUp1 performs in-place sort of int slice in ascending order.
func MergeSortBottomUp1(a []int) {
b := make([]int, len(a), len(a))
s := 1
for s < len(a) {
for left, right := 0, s; left < len(a); left, right = left+s*2, right+s*2 {
z := 0
l := left
ls := l + s
if ls > len(a) {
ls = len(a)
}
r := right
rs := r + s
if rs > len(a) {
rs = len(a)
}
for l < ls || r < rs {
if l < ls && (r >= rs || a[l] <= a[r]) {
b[z] = a[l]
l++
} else {
b[z] = a[r]
r++
}
z++
}
for m := 0; m < z; m++ {
a[left+m] = b[m]
}
}
s *= 2
}
}
func min(a, b int) int {
if a < b {
return a
}
return b
}
// MergeSortBottomUp2 performs in-place sort of int slice in ascending order.
func MergeSortBottomUp2(a []int) {
b := make([]int, len(a), len(a))
for s := 1; s < len(a); s *= 2 {
for left := 0; left < len(a); left += s * 2 {
l := left
r := left + s
ls := min(r, len(a))
rs := min(r+s, len(a))
for z := left; z < rs; z++ {
if l < ls && (r >= rs || a[l] <= a[r]) {
b[z] = a[l]
l++
} else {
b[z] = a[r]
r++
}
}
for z := left; z < rs; z++ {
a[z] = b[z]
}
}
}
} | sort/merge.go | 0.565779 | 0.521837 | merge.go | starcoder |
package gohorizon
import (
"encoding/json"
)
// NetworkLabelAssignmentSettings Specification for an individual network label assignment, stipulating the label and how many times it may be assigned to machines with this spec.
type NetworkLabelAssignmentSettings struct {
// Whether or not this specification is enabled. While this specification is disabled, automatic network label assigment for this desktop pool will skip over the network label in this spec.
Enabled *bool `json:"enabled,omitempty"`
// The maximum number of times this label can be assigned to a machine. Note this count only encompasses this spec. That is, this label may be used for other NICs and in other Desktop pools, but those assignments will not be counted towards this total. This count also does not include assignments of this label to machines not under the control of View.
MaxLabel *int32 `json:"max_label,omitempty"`
// This type specifies whether or not there is a maximum limit to the number of times this label may be assigned to machines within this spec. While this specification is enabled and unlimited, specs after this one in the NIC's network label specification list will never be used. * UNLIMITED: The network label assignment specification has no limit on the number of labels to assign. * LIMITED: The network label assignment specification has a limited number of labels to assign.
MaxLabelType *string `json:"max_label_type,omitempty"`
// The network label id for this spec. This network label must not have any incompatibility reasons that would preclude it from automatic machine assignment.
NetworkLabelName *string `json:"network_label_name,omitempty"`
}
// NewNetworkLabelAssignmentSettings instantiates a new NetworkLabelAssignmentSettings object
// This constructor will assign default values to properties that have it defined,
// and makes sure properties required by API are set, but the set of arguments
// will change when the set of required properties is changed
func NewNetworkLabelAssignmentSettings() *NetworkLabelAssignmentSettings {
this := NetworkLabelAssignmentSettings{}
return &this
}
// NewNetworkLabelAssignmentSettingsWithDefaults instantiates a new NetworkLabelAssignmentSettings object
// This constructor will only assign default values to properties that have it defined,
// but it doesn't guarantee that properties required by API are set
func NewNetworkLabelAssignmentSettingsWithDefaults() *NetworkLabelAssignmentSettings {
this := NetworkLabelAssignmentSettings{}
return &this
}
// GetEnabled returns the Enabled field value if set, zero value otherwise.
func (o *NetworkLabelAssignmentSettings) GetEnabled() bool {
if o == nil || o.Enabled == nil {
var ret bool
return ret
}
return *o.Enabled
}
// GetEnabledOk returns a tuple with the Enabled field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *NetworkLabelAssignmentSettings) GetEnabledOk() (*bool, bool) {
if o == nil || o.Enabled == nil {
return nil, false
}
return o.Enabled, true
}
// HasEnabled returns a boolean if a field has been set.
func (o *NetworkLabelAssignmentSettings) HasEnabled() bool {
if o != nil && o.Enabled != nil {
return true
}
return false
}
// SetEnabled gets a reference to the given bool and assigns it to the Enabled field.
func (o *NetworkLabelAssignmentSettings) SetEnabled(v bool) {
o.Enabled = &v
}
// GetMaxLabel returns the MaxLabel field value if set, zero value otherwise.
func (o *NetworkLabelAssignmentSettings) GetMaxLabel() int32 {
if o == nil || o.MaxLabel == nil {
var ret int32
return ret
}
return *o.MaxLabel
}
// GetMaxLabelOk returns a tuple with the MaxLabel field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *NetworkLabelAssignmentSettings) GetMaxLabelOk() (*int32, bool) {
if o == nil || o.MaxLabel == nil {
return nil, false
}
return o.MaxLabel, true
}
// HasMaxLabel returns a boolean if a field has been set.
func (o *NetworkLabelAssignmentSettings) HasMaxLabel() bool {
if o != nil && o.MaxLabel != nil {
return true
}
return false
}
// SetMaxLabel gets a reference to the given int32 and assigns it to the MaxLabel field.
func (o *NetworkLabelAssignmentSettings) SetMaxLabel(v int32) {
o.MaxLabel = &v
}
// GetMaxLabelType returns the MaxLabelType field value if set, zero value otherwise.
func (o *NetworkLabelAssignmentSettings) GetMaxLabelType() string {
if o == nil || o.MaxLabelType == nil {
var ret string
return ret
}
return *o.MaxLabelType
}
// GetMaxLabelTypeOk returns a tuple with the MaxLabelType field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *NetworkLabelAssignmentSettings) GetMaxLabelTypeOk() (*string, bool) {
if o == nil || o.MaxLabelType == nil {
return nil, false
}
return o.MaxLabelType, true
}
// HasMaxLabelType returns a boolean if a field has been set.
func (o *NetworkLabelAssignmentSettings) HasMaxLabelType() bool {
if o != nil && o.MaxLabelType != nil {
return true
}
return false
}
// SetMaxLabelType gets a reference to the given string and assigns it to the MaxLabelType field.
func (o *NetworkLabelAssignmentSettings) SetMaxLabelType(v string) {
o.MaxLabelType = &v
}
// GetNetworkLabelName returns the NetworkLabelName field value if set, zero value otherwise.
func (o *NetworkLabelAssignmentSettings) GetNetworkLabelName() string {
if o == nil || o.NetworkLabelName == nil {
var ret string
return ret
}
return *o.NetworkLabelName
}
// GetNetworkLabelNameOk returns a tuple with the NetworkLabelName field value if set, nil otherwise
// and a boolean to check if the value has been set.
func (o *NetworkLabelAssignmentSettings) GetNetworkLabelNameOk() (*string, bool) {
if o == nil || o.NetworkLabelName == nil {
return nil, false
}
return o.NetworkLabelName, true
}
// HasNetworkLabelName returns a boolean if a field has been set.
func (o *NetworkLabelAssignmentSettings) HasNetworkLabelName() bool {
if o != nil && o.NetworkLabelName != nil {
return true
}
return false
}
// SetNetworkLabelName gets a reference to the given string and assigns it to the NetworkLabelName field.
func (o *NetworkLabelAssignmentSettings) SetNetworkLabelName(v string) {
o.NetworkLabelName = &v
}
func (o NetworkLabelAssignmentSettings) MarshalJSON() ([]byte, error) {
toSerialize := map[string]interface{}{}
if o.Enabled != nil {
toSerialize["enabled"] = o.Enabled
}
if o.MaxLabel != nil {
toSerialize["max_label"] = o.MaxLabel
}
if o.MaxLabelType != nil {
toSerialize["max_label_type"] = o.MaxLabelType
}
if o.NetworkLabelName != nil {
toSerialize["network_label_name"] = o.NetworkLabelName
}
return json.Marshal(toSerialize)
}
type NullableNetworkLabelAssignmentSettings struct {
value *NetworkLabelAssignmentSettings
isSet bool
}
func (v NullableNetworkLabelAssignmentSettings) Get() *NetworkLabelAssignmentSettings {
return v.value
}
func (v *NullableNetworkLabelAssignmentSettings) Set(val *NetworkLabelAssignmentSettings) {
v.value = val
v.isSet = true
}
func (v NullableNetworkLabelAssignmentSettings) IsSet() bool {
return v.isSet
}
func (v *NullableNetworkLabelAssignmentSettings) Unset() {
v.value = nil
v.isSet = false
}
func NewNullableNetworkLabelAssignmentSettings(val *NetworkLabelAssignmentSettings) *NullableNetworkLabelAssignmentSettings {
return &NullableNetworkLabelAssignmentSettings{value: val, isSet: true}
}
func (v NullableNetworkLabelAssignmentSettings) MarshalJSON() ([]byte, error) {
return json.Marshal(v.value)
}
func (v *NullableNetworkLabelAssignmentSettings) UnmarshalJSON(src []byte) error {
v.isSet = true
return json.Unmarshal(src, &v.value)
} | model_network_label_assignment_settings.go | 0.806434 | 0.529689 | model_network_label_assignment_settings.go | starcoder |
Left/Right Pan and Volume Module
Takes a single audio buffer stream as input and outputs left and right channels.
*/
//-----------------------------------------------------------------------------
package mix
import (
"github.com/deadsy/babi/core"
"github.com/deadsy/babi/utils/log"
)
//-----------------------------------------------------------------------------
var panMixInfo = core.ModuleInfo{
Name: "panMix",
In: []core.PortInfo{
{"in", "input", core.PortTypeAudio, nil},
{"midi", "midi input", core.PortTypeMIDI, panMixMidiIn},
{"vol", "volume (0..1)", core.PortTypeFloat, panMixVolume},
{"pan", "left/right pan (0..1)", core.PortTypeFloat, panMixPan},
},
Out: []core.PortInfo{
{"out0", "left channel output", core.PortTypeAudio, nil},
{"out1", "right channel output", core.PortTypeAudio, nil},
},
}
// Info returns the module information.
func (m *panMix) Info() *core.ModuleInfo {
return &m.info
}
//-----------------------------------------------------------------------------
type panMix struct {
info core.ModuleInfo // module info
ch uint8 // MIDI channel
ccPan uint8 // MIDI CC number for pan control
ccVol uint8 // MIDI CC number for volume control
vol float32 // overall volume
pan float32 // pan value 0 == left, 1 == right
volL float32 // left channel volume
volR float32 // right channel volume
}
// NewPan returns a left/right pan and volume module.
func NewPan(s *core.Synth, ch, cc uint8) core.Module {
log.Info.Printf("")
m := &panMix{
info: panMixInfo,
ch: ch,
ccPan: cc,
ccVol: cc + 1,
}
return s.Register(m)
}
// Return the child modules.
func (m *panMix) Child() []core.Module {
return nil
}
// Stop and performs any cleanup of a module.
func (m *panMix) Stop() {
}
//-----------------------------------------------------------------------------
// Port Events
func (m *panMix) set() {
// Use sin/cos so that l*l + r*r = K (constant power)
m.volL = m.vol * core.Cos(m.pan)
m.volR = m.vol * core.Sin(m.pan)
}
func (m *panMix) setVol(vol float32) {
log.Info.Printf("set volume %f", vol)
// convert to a linear volume
m.vol = core.Pow2(vol) - 1.0
m.set()
}
func (m *panMix) setPan(pan float32) {
log.Info.Printf("set pan %f", pan)
m.pan = pan * (core.Pi / 2.0)
m.set()
}
func panMixVolume(cm core.Module, e *core.Event) {
m := cm.(*panMix)
m.setVol(core.Clamp(e.GetEventFloat().Val, 0, 1))
}
func panMixPan(cm core.Module, e *core.Event) {
m := cm.(*panMix)
m.setPan(core.Clamp(e.GetEventFloat().Val, 0, 1))
}
func panMixMidiIn(cm core.Module, e *core.Event) {
m := cm.(*panMix)
me := e.GetEventMIDIChannel(m.ch)
if me != nil {
if me.GetType() == core.EventMIDIControlChange {
switch me.GetCcNum() {
case m.ccVol:
m.setVol(me.GetCcFloat())
case m.ccPan:
m.setPan(me.GetCcFloat())
default:
// ignore
}
}
}
}
//-----------------------------------------------------------------------------
// Process runs the module DSP.
func (m *panMix) Process(buf ...*core.Buf) bool {
in := buf[0]
out0 := buf[1]
out1 := buf[2]
// left
out0.Copy(in)
out0.MulScalar(m.volL)
// right
out1.Copy(in)
out1.MulScalar(m.volR)
return true
}
//----------------------------------------------------------------------------- | module/mix/pan.go | 0.69035 | 0.417153 | pan.go | starcoder |
package sdf
// Connector3d stores the information needed to connector to another part
type Connector3d struct {
Position V3
Vector V3
Angle float64
}
// Transform3DConnector applies a transformation matrix to an SDF3 and a connector.
// func Transform3DConnector(sdf SDF3, connectors map[string]Connector3d, matrix M44) (SDF3, map[string]Connector3d) {
// s := TransformSDF3{}
// s.sdf = sdf
// s.matrix = matrix
// s.inverse = matrix.Inverse()
// s.bb = matrix.MulBox(sdf.BoundingBox())
// for key := range connectors {
// connector := connectors[key]
// connector.Position = matrix.MulPosition(connectors[key].Position)
// connectors[key] = connector
// }
// return &s, connectors
// }
// ConnectorizedSDF3 is an SDF3 that can store connectors
type ConnectorizedSDF3 interface {
SDF3
Connectors() map[string]Connector3d
AddConnector(name string, connector Connector3d)
Connect(parentConnector string, child ConnectorizedSDF3, childConnector string) ConnectorizedSDF3
}
// SDF3WithConnectors is a SDF3 with connectors
type SDF3WithConnectors struct {
SDF3
connectors map[string]Connector3d
}
// Connectors returns all of the connectors
func (s *SDF3WithConnectors) Connectors() map[string]Connector3d {
return s.connectors
}
// AddConnector add a Connector3d to an SDF3
func (s *SDF3WithConnectors) AddConnector(name string, connector Connector3d) {
if s.connectors == nil {
s.connectors = make(map[string]Connector3d)
}
s.connectors[name] = connector
}
// Connect moves a child SDF so the specified connectors on the parent and child align, unions them and returns the union.
func (s *SDF3WithConnectors) Connect(parentConnector string, child ConnectorizedSDF3, childConnector string) ConnectorizedSDF3 {
possDiff := s.connectors[parentConnector].Position.Sub(child.Connectors()[childConnector].Position)
transformedChild := Transform3D(child, Translate3d(possDiff))
s2 := UnionConnectorizedSDF3{}
s2.sdf = []SDF3{s, transformedChild}
// work out the bounding box
s2.bb = s.BoundingBox().Extend(transformedChild.BoundingBox())
s2.min = Min
s2.connectors = s.Connectors()
return &s2
}
// UnionConnectorizedSDF3 is a union of SDF3s.
type UnionConnectorizedSDF3 struct {
sdf []SDF3
connectors map[string]Connector3d
min MinFunc
bb Box3
}
// Evaluate returns the minimum distance to an SDF3 union.
func (s *UnionConnectorizedSDF3) Evaluate(p V3) float64 {
var d float64
for i, x := range s.sdf {
if i == 0 {
d = x.Evaluate(p)
} else {
d = s.min(d, x.Evaluate(p))
}
}
return d
}
// BoundingBox returns the bounding box of an SDF3 union.
func (s *UnionConnectorizedSDF3) BoundingBox() Box3 {
return s.bb
}
// SetMin is used to control blending
func (s *UnionConnectorizedSDF3) SetMin(min MinFunc) {
s.min = min
}
// AddConnector add a Connector3d to an SDF3
func (s *UnionConnectorizedSDF3) AddConnector(name string, connector Connector3d) {
s.connectors[name] = connector
}
// Connect returns the union of multiple SDF3 objects.
func (s *UnionConnectorizedSDF3) Connect(parentConnector string, child ConnectorizedSDF3, childConnector string) ConnectorizedSDF3 {
possDiff := s.connectors[parentConnector].Position.Sub(child.Connectors()[childConnector].Position)
transformedChild := Transform3D(child, Translate3d(possDiff))
s2 := UnionConnectorizedSDF3{}
s2.sdf = append(s.sdf, transformedChild)
// work out the bounding box
s2.bb = s.BoundingBox().Extend(transformedChild.BoundingBox())
s2.min = Min
s2.connectors = s.Connectors()
return &s2
}
// Connectors returns the map of Connector3ds associated with the SDF
func (s *UnionConnectorizedSDF3) Connectors() map[string]Connector3d {
if s.connectors == nil {
s.connectors = make(map[string]Connector3d)
}
return s.connectors
} | sdf/connectors.go | 0.646906 | 0.442637 | connectors.go | starcoder |
package gonighttime
import (
"math"
"time"
"github.com/kelvins/sunrisesunset"
)
type Place struct {
Lat float64
Lon float64
Time time.Time
}
type Route struct {
Departure Place
Arrival Place
}
func deg2rad(degrees float64) float64 {
return degrees * math.Pi / 180
}
func hsin(theta float64) float64 {
return math.Pow(math.Sin(theta/2), 2)
}
// Midpoint calculates a middle point between two coordinates
func Midpoint(start Place, end Place) Place {
lat1 := deg2rad(start.Lat)
lon1 := deg2rad(start.Lon)
lat2 := deg2rad(end.Lat)
lon2 := deg2rad(end.Lon)
dlon := lon2 - lon1
Bx := math.Cos(lat2) * math.Cos(dlon)
By := math.Cos(lat2) * math.Sin(dlon)
lat := math.Atan2(math.Sin(lat1)+math.Sin(lat2),
math.Sqrt((math.Cos(lat1)+Bx)*(math.Cos(lat1)+Bx)+By*By))
lon := lon1 + math.Atan2(By, (math.Cos(lat1)+Bx))
lat = (lat * 180) / math.Pi
lon = (lon * 180) / math.Pi
return Place{
Lat: lat,
Lon: lon,
}
}
// distance calculates a distance between 2 points
func distance(lat1, lon1, lat2, lon2 float64) float64 {
lat1 = deg2rad(lat1)
lon1 = deg2rad(lon1)
lat2 = deg2rad(lat2)
lon2 = deg2rad(lon2)
r := 6378100.0
h := hsin(lat2-lat1) + math.Cos(lat1)*math.Cos(lat2)*hsin(lon2-lon1)
return 2 * r * math.Asin(math.Sqrt(h)) / 1000 / 1.852 // nautical miles
}
// Distance returns the route distance
func (route *Route) Distance() float64 {
return distance(route.Departure.Lat, route.Departure.Lon, route.Arrival.Lat, route.Arrival.Lon)
}
// FlightTime calculates total flight time
func (route *Route) FlightTime() time.Duration {
return route.Arrival.Time.Sub(route.Departure.Time)
}
// Speed calculates average speed in knots
func (route *Route) Speed() float64 {
return route.Distance() / route.FlightTime().Hours()
}
// SunriseSunset returns sunrise and sunset times
func (place *Place) SunriseSunset() (time.Time, time.Time) {
params := sunrisesunset.Parameters{
Latitude: place.Lat,
Longitude: place.Lon,
Date: place.Time,
}
sunrise, sunset, _ := params.GetSunriseSunset()
return sunrise, sunset
}
// Sunrise returns aviation sunrise time (-30 minutes from apparent sunrise)
func (place *Place) Sunrise() time.Time {
s, _ := place.SunriseSunset()
return s.Add(time.Duration(-30) * time.Minute)
}
// Sunset returns aviation sunset time (+30 minutes from apparent sunset)
func (place *Place) Sunset() time.Time {
_, s := place.SunriseSunset()
return s.Add(time.Duration(30) * time.Minute)
}
// MeetWithSun finds the point on the route where airplane meets with Sun (rised or set)
func (route *Route) MeetWithSun(target string) Place {
maxIterations := 20 // max iteratons, in case some error just not to iterate infinite
maxDiffMinutes := 1.0 // tolerance in minutes, where we agreed we got the sunset/sunrise
iter := 0
var xPoint Place
diff := time.Duration(0)
startPoint := route.Departure
endPoint := route.Arrival
speed := route.Speed()
for iter < maxIterations {
iter++
xPoint = Midpoint(startPoint, endPoint)
distance := distance(route.Departure.Lat, route.Departure.Lon, xPoint.Lat, xPoint.Lon)
flightTime := distance / speed * 60
xPoint.Time = route.Departure.Time.Add(time.Duration(flightTime) * time.Minute)
if target == "sunrise" {
diff = xPoint.Time.Sub(xPoint.Sunrise())
} else {
diff = xPoint.Time.Sub(xPoint.Sunset())
}
if math.Abs(diff.Minutes()) > maxDiffMinutes {
if diff.Minutes() > 0 {
endPoint = xPoint
} else {
startPoint = xPoint
}
} else {
break
}
}
return xPoint
}
// NightTime returns a calculated night time
func (route *Route) NightTime() time.Duration {
nightTime := time.Duration(0)
if (route.Departure.Time.After(route.Departure.Sunrise()) && route.Departure.Time.Before(route.Departure.Sunset())) &&
(route.Arrival.Time.After(route.Arrival.Sunrise()) && route.Arrival.Time.Before(route.Arrival.Sunset())) {
// full day flight
nightTime = time.Duration(0)
} else if route.Departure.Time.After(route.Departure.Sunrise()) && route.Departure.Time.Before(route.Departure.Sunset()) {
// flight from day to night, night landing
point := route.MeetWithSun("sunset")
nightTime = route.Arrival.Time.Sub(point.Time)
} else if route.Arrival.Time.After(route.Arrival.Sunrise()) && route.Arrival.Time.Before(route.Arrival.Sunset()) {
// flight from night to day, day landing
point := route.MeetWithSun("sunrise")
nightTime = point.Time.Sub(route.Departure.Time)
} else {
// full night time
nightTime = route.FlightTime()
}
return nightTime
} | nighttime.go | 0.861974 | 0.606324 | nighttime.go | starcoder |
package spec
import (
"testing"
"time"
"github.com/256dpi/gomqtt/client"
"github.com/256dpi/gomqtt/packet"
"github.com/stretchr/testify/assert"
)
// AuthenticationTest tests the broker for valid and invalid authentication.
func AuthenticationTest(t *testing.T, config *Config) {
deniedClient := client.New()
deniedClient.Callback = func(msg *packet.Message, err error) error {
assert.Equal(t, client.ErrClientConnectionDenied, err)
return nil
}
cf, err := deniedClient.Connect(client.NewConfig(config.DenyURL))
assert.NoError(t, err)
assert.Error(t, cf.Wait(10*time.Second))
assert.Equal(t, packet.NotAuthorized, cf.ReturnCode())
assert.False(t, cf.SessionPresent())
allowedClient := client.New()
cf, err = allowedClient.Connect(client.NewConfig(config.URL))
assert.NoError(t, err)
assert.NoError(t, cf.Wait(10*time.Second))
assert.Equal(t, packet.ConnectionAccepted, cf.ReturnCode())
assert.False(t, cf.SessionPresent())
err = allowedClient.Disconnect()
assert.NoError(t, err)
}
// UniqueClientIDUncleanTest tests the broker for enforcing unique client ids.
func UniqueClientIDUncleanTest(t *testing.T, config *Config) {
id := config.clientID()
options := client.NewConfigWithClientID(config.URL, id)
options.CleanSession = false
assert.NoError(t, client.ClearSession(options, 10*time.Second))
wait := make(chan struct{})
firstClient := client.New()
firstClient.Callback = func(msg *packet.Message, err error) error {
close(wait)
return nil
}
cf, err := firstClient.Connect(options)
assert.NoError(t, err)
assert.NoError(t, cf.Wait(10*time.Second))
assert.Equal(t, packet.ConnectionAccepted, cf.ReturnCode())
assert.False(t, cf.SessionPresent())
secondClient := client.New()
cf, err = secondClient.Connect(options)
assert.NoError(t, err)
assert.NoError(t, cf.Wait(10*time.Second))
assert.Equal(t, packet.ConnectionAccepted, cf.ReturnCode())
assert.True(t, cf.SessionPresent())
safeReceive(wait)
err = secondClient.Disconnect()
assert.NoError(t, err)
}
// UniqueClientIDCleanTest tests the broker for enforcing unique client ids.
func UniqueClientIDCleanTest(t *testing.T, config *Config) {
id := config.clientID()
options := client.NewConfigWithClientID(config.URL, id)
options.CleanSession = true
wait := make(chan struct{})
firstClient := client.New()
firstClient.Callback = func(msg *packet.Message, err error) error {
close(wait)
return nil
}
cf, err := firstClient.Connect(options)
assert.NoError(t, err)
assert.NoError(t, cf.Wait(10*time.Second))
assert.Equal(t, packet.ConnectionAccepted, cf.ReturnCode())
assert.False(t, cf.SessionPresent())
secondClient := client.New()
cf, err = secondClient.Connect(options)
assert.NoError(t, err)
assert.NoError(t, cf.Wait(10*time.Second))
assert.Equal(t, packet.ConnectionAccepted, cf.ReturnCode())
assert.False(t, cf.SessionPresent())
safeReceive(wait)
err = secondClient.Disconnect()
assert.NoError(t, err)
}
// RootSlashDistinctionTest tests the broker for supporting the root slash
// distinction.
func RootSlashDistinctionTest(t *testing.T, config *Config, topic string) {
c := client.New()
wait := make(chan struct{})
c.Callback = func(msg *packet.Message, err error) error {
assert.NoError(t, err)
assert.Equal(t, topic, msg.Topic)
assert.Equal(t, testPayload, msg.Payload)
assert.Equal(t, packet.QOS(0), msg.QOS)
assert.False(t, msg.Retain)
close(wait)
return nil
}
cf, err := c.Connect(client.NewConfig(config.URL))
assert.NoError(t, err)
assert.NoError(t, cf.Wait(10*time.Second))
assert.Equal(t, packet.ConnectionAccepted, cf.ReturnCode())
assert.False(t, cf.SessionPresent())
sf, err := c.Subscribe("/"+topic, 0)
assert.NoError(t, err)
assert.NoError(t, sf.Wait(10*time.Second))
assert.Equal(t, []packet.QOS{0}, sf.ReturnCodes())
sf, err = c.Subscribe(topic, 0)
assert.NoError(t, err)
assert.NoError(t, sf.Wait(10*time.Second))
assert.Equal(t, []packet.QOS{0}, sf.ReturnCodes())
pf, err := c.Publish(topic, testPayload, 0, false)
assert.NoError(t, err)
assert.NoError(t, pf.Wait(10*time.Second))
safeReceive(wait)
time.Sleep(config.NoMessageWait)
err = c.Disconnect()
assert.NoError(t, err)
} | spec/advanced.go | 0.672547 | 0.458773 | advanced.go | starcoder |
package plots
import (
"math"
"reflect"
"sort"
)
// A series of data to be used in a 2D graph. Implements the plotinum XYer
// interface.
type Series struct {
xs, ys []float64
}
func MakeSeries(xs, ys []float64) Series {
s := Series{xs, ys}
return s
}
func (s *Series) X(i int) float64 {
return s.xs[i]
}
func (s *Series) Y(i int) float64 {
return s.ys[i]
}
func (s *Series) Len() int {
return len(s.xs)
}
func (s *Series) Pairs() [][]float64 {
L := s.Len()
r := make([][]float64, L)
for i := 0; i < L; i++ {
r[i] = make([]float64, 2)
r[i][0] = s.X(i)
r[i][1] = s.Y(i)
}
return r
}
// Extract (x, y) points from dataSet where the names of x and y are given by
// varNames[0] and [1]. varNames[2] ("z") optionally specifies a variable to
// use to split the data into multiple series. `constraints` specifies
// parameter values to include; for example if constraints = {"Tz": 0.1}, only
// data points with Tz = 0.1 will be extracted. If the name of y is given as
// the empty string, `YFunc` is used to obtain a value for y instead.
func ExtractSeries(dataSet []interface{}, errs []error, varNames []string, constraints map[string]float64, XFunc, YFunc func(interface{}) float64, addZeros bool) ([]Series, []float64) {
if len(varNames) < 2 {
panic("not enough variable names for ExtractSeries")
} else if len(varNames) == 2 {
return extractXY(dataSet, varNames[0], varNames[1]), nil
}
// iterate through dataSet to create a map x->y for each z
maps := make(map[float64]map[float64]float64)
zs := make([]float64, 0)
for i, data := range dataSet {
if errs[i] != nil {
continue
}
val := reflect.ValueOf(data)
// check that constraints hold for this point
ok := true
for k, v := range constraints {
c := val.FieldByName(k).Float()
if v != c {
ok = false
break
}
}
if !ok {
continue
}
// constraints fit; keep point
var x, y float64
if varNames[0] != "" {
x = val.FieldByName(varNames[0]).Float()
} else {
x = XFunc(data)
}
if varNames[1] != "" {
yval := val.FieldByName(varNames[1])
if yval.IsValid() {
y = yval.Float()
} else {
y = 0.0
}
} else {
y = YFunc(data)
}
if math.IsNaN(y) {
// replace with default values
// might want to be smarter about this
y = 0.0
}
z := val.FieldByName(varNames[2]).Float()
zmap, ok := maps[z]
if !ok {
zs = append(zs, z)
maps[z] = make(map[float64]float64)
zmap = maps[z]
}
zmap[x] = y
}
// create the slice of Series in ascending-z order
sort.Float64s(zs)
ret := make([]Series, len(zs))
for i, z := range zs {
zmap := maps[z]
// x should be in ascending order within the series
xs := make([]float64, 0)
if addZeros {
xs = append(xs, 0.0)
}
for x, _ := range zmap {
xs = append(xs, x)
}
sort.Float64s(xs)
ys := make([]float64, len(xs))
if addZeros {
ys[0] = 0.0
}
for j, x := range xs {
if addZeros && j == 0 {
continue
}
ys[j] = zmap[x]
}
ret[i] = Series{xs, ys}
}
return ret, zs
}
func extractXY(dataSet []interface{}, varX, varY string) []Series {
xs, ymap := make([]float64, 0), make(map[float64]float64)
for _, data := range dataSet {
val := reflect.ValueOf(data)
x := val.FieldByName(varX).Float()
y := val.FieldByName(varY).Float()
xs = append(xs, x)
ymap[x] = y
}
sort.Float64s(xs)
ys := make([]float64, len(xs))
for i, x := range xs {
ys[i] = ymap[x]
}
return []Series{Series{xs, ys}}
} | plots/series.go | 0.745306 | 0.558929 | series.go | starcoder |
package utils
import (
"strings"
"time"
)
type p struct {
find string
format string
reg string
}
/*
Formats:
M - month (1)
MM - month (01)
MMM - month (Jan)
MMMM - month (January)
D - day (2)
DD - day (02)
DDD - day (Mon)
DDDD - day (Monday)
T - Time (T, 2006-01-02T15:04:05)
YY - year (06)
YYYY - year (2006)
hh - hours (15)
mm - minutes (04)
ss - seconds (05)
AM/PM hours: 'h' followed by optional 'mm' and 'ss' followed by 'pm', e.g.
hpm - hours (03PM)
h:mmpm - hours:minutes (03:04PM)
h:mm:sspm - hours:minutes:seconds (03:04:05PM)
Time zones: a time format followed by 'ZZZZ', 'ZZZ' or 'ZZ', e.g.
hh:mm:ss ZZZZ (16:05:06 +0100)
hh:mm:ss ZZZ (16:05:06 CET)
hh:mm:ss ZZ (16:05:06 +01:00)
*/
var Placeholder = []p{
p{"hh", "15", "\\d{2}"},
p{"h", "03", "\\d{2}"},
p{"mm", "04", "\\d{2}"},
p{"ss", "05", "\\d{2}"},
p{"SSS", "999", "\\d{3}"},
p{"MMMM", "January", "\\w{3,9}"},
p{"MMM", "Jan", "\\w{3}"},
p{"MM", "01", "\\d{2}"},
p{"M", "1", "\\d{1}"},
p{"T", "T", "\\w{1}"},
p{"pm", "PM", "\\w{2}"},
p{"ZZZZ", "-0700", "(-|\\+)\\d{4}"},
p{"ZZZ", "MST", "\\w{3}"},
p{"ZZ", "Z07:00", "\\w{1}"},
p{"YYYY", "2006", "\\d{4}"},
p{"YY", "06", "\\d{2}"},
p{"DDDD", "Monday", "\\w{6,9}"},
p{"DDD", "Mon", "\\w{3}"},
p{"DD", "02", "\\d{2}"},
p{"D", "2", "\\d{1}"},
}
var (
DefaultTimeFormat = "hh:mm:ss"
DefaultDateFormat = "YYYY-MM-DD"
DefaultDateTimeFormat = "YYYY-MM-DDThh:mm:ss"
)
func replace(in string) (out string) {
out = in
for _, ph := range Placeholder {
out = strings.Replace(out, ph.find, ph.format, -1)
}
return
}
func GetReg(in string) (reg string) {
reg = in
for _, ph := range Placeholder {
reg = strings.Replace(reg, ph.find, ph.reg, -1)
}
return
}
// Format formats a date based on Microsoft Excel (TM) conventions
func Format(format string, date time.Time) string {
return date.Format(replace(format))
}
// Parse parses a value to a date based on Microsoft Excel (TM) formats
func Parse(format string, value string) (time.Time, error) {
return time.ParseInLocation(replace(format), value, time.Local)
} | agent/core/utils/fmt_date.go | 0.513668 | 0.401072 | fmt_date.go | starcoder |
package common
import (
"math"
)
// returns shortest distances from the given source node to all vertices in the graph
func (graph *Graph) ShortestDistancesFromSource(src *Node) map[int]float64 {
result := graph.ShortestPath(src, ShortestPathParams{})
return result.Distances
}
type ShortestPathParams struct {
// maximum distance to travel from src
MaxDistance float64
// terminate search once we reach any of these nodes
StopNodes []*Node
// override edge length
EdgeLengths map[int]float64
}
func (params ShortestPathParams) IsStopNode(node *Node) bool {
for _, other := range params.StopNodes {
if other == node {
return true
}
}
return false
}
type ShortestPathResult struct {
source *Node
graph *Graph
Distances map[int]float64
Remaining map[int]bool
Backpointers map[int]int
}
func (result ShortestPathResult) GetPathTo(node *Node) []*Node {
if result.Remaining[node.ID] {
return nil
} else if _, ok := result.Backpointers[node.ID]; !ok {
return nil
}
var reverseSeq []*Node
curNode := node
for curNode.ID != result.source.ID {
reverseSeq = append(reverseSeq, curNode)
curNode = result.graph.Nodes[result.Backpointers[curNode.ID]]
}
path := make([]*Node, len(reverseSeq))
for i, node := range reverseSeq {
path[len(path) - i - 1] = node
}
return path
}
func (result ShortestPathResult) GetFullPathTo(node *Node) []*Node {
return append([]*Node{result.source}, result.GetPathTo(node)...)
}
func (graph *Graph) ShortestPath(src *Node, params ShortestPathParams) ShortestPathResult {
// use Dijkstra's algorithm
distances := make(map[int]float64)
remaining := make(map[int]bool)
backpointers := make(map[int]int)
for _, node := range graph.Nodes {
distances[node.ID] = math.Inf(1)
remaining[node.ID] = true
}
distances[src.ID] = 0
backpointers[src.ID] = src.ID
for len(remaining) > 0 {
var closestNode *Node
var closestDistance float64
for nodeID := range remaining {
if !math.IsInf(distances[nodeID], 1) && (closestNode == nil || distances[nodeID] < closestDistance) {
closestNode = graph.Nodes[nodeID]
closestDistance = distances[nodeID]
}
}
if closestNode == nil {
break
}
delete(remaining, closestNode.ID)
if (params.MaxDistance != 0 && closestDistance > params.MaxDistance) || params.IsStopNode(closestNode) {
break
}
for _, edge := range closestNode.Out {
var edgeLength float64
if l, ok := params.EdgeLengths[edge.ID]; ok {
edgeLength = l
} else {
edgeLength = edge.Segment().Length()
}
d := closestDistance + edgeLength
if remaining[edge.Dst.ID] && d < distances[edge.Dst.ID] {
distances[edge.Dst.ID] = d
backpointers[edge.Dst.ID] = closestNode.ID
}
}
}
return ShortestPathResult{
source: src,
graph: graph,
Distances: distances,
Remaining: remaining,
Backpointers: backpointers,
}
}
type FollowParams struct {
// Source, only one should be specified.
SourceNodes []*Node
SourcePos EdgePos
// Distance to travel along graph from source.
Distance float64
// If true, don't search forwards.
NoForwards bool
// If true, search backwards (in addition to searching forwards).
Backwards bool
// If set, will be populated with nodes that we pass during following.
SeenNodes map[int]bool
// If set, we will stop immediately before these nodes rather than passing them.
StopNodes map[int]bool
}
// Find locations after traveling along the graph from pos for distance.
func (graph *Graph) Follow(params FollowParams) []EdgePos {
seenNodePairs := make(map[[2]int]bool)
var positions []EdgePos
var followNode func(node *Node, remaining float64, backwards bool)
followForwards := func(pos EdgePos, remaining float64) {
seenNodePairs[[2]int{pos.Edge.Src.ID, pos.Edge.Dst.ID}] = true
if pos.Position + remaining <= pos.Edge.Segment().Length() {
positions = append(positions, EdgePos{
pos.Edge,
pos.Position + remaining,
})
} else if params.StopNodes != nil && params.StopNodes[pos.Edge.Dst.ID] {
positions = append(positions, EdgePos{
pos.Edge,
pos.Edge.Segment().Length(),
})
} else {
followNode(pos.Edge.Dst, remaining - (pos.Edge.Segment().Length() - pos.Position), false)
}
}
followBackwards := func(pos EdgePos, remaining float64) {
seenNodePairs[[2]int{pos.Edge.Src.ID, pos.Edge.Dst.ID}] = true
if remaining <= pos.Position {
positions = append(positions, EdgePos{
pos.Edge,
pos.Position - remaining,
})
} else if params.StopNodes != nil && params.StopNodes[pos.Edge.Src.ID] {
positions = append(positions, EdgePos{
pos.Edge,
0,
})
} else {
followNode(pos.Edge.Src, remaining - pos.Position, true)
}
}
followNode = func(node *Node, remaining float64, backwards bool) {
if params.SeenNodes != nil {
params.SeenNodes[node.ID] = true
}
var edges []*Edge
if !backwards {
edges = node.Out
} else {
edges = node.In
}
for _, edge := range edges {
if seenNodePairs[[2]int{edge.Src.ID, edge.Dst.ID}] || seenNodePairs[[2]int{edge.Dst.ID, edge.Src.ID}] {
continue
}
if !backwards {
followForwards(EdgePos{
edge,
0,
}, remaining)
} else {
followBackwards(EdgePos{
edge,
edge.Segment().Length(),
}, remaining)
}
}
}
if len(params.SourceNodes) > 0 {
for _, node := range params.SourceNodes {
if !params.NoForwards {
followNode(node, params.Distance, false)
}
if params.Backwards {
followNode(node, params.Distance, true)
}
}
} else {
if !params.NoForwards {
followForwards(params.SourcePos, params.Distance)
}
if params.Backwards {
followBackwards(params.SourcePos, params.Distance)
}
}
return positions
}
// A*
type AstarParams struct {
// maximum distance to travel from src
MaxDistance float64
}
func (graph *Graph) Astar(src *Node, dst *Node, params AstarParams) ShortestPathResult {
distances := make(map[int]float64)
remaining := make(map[int]bool)
backpointers := make(map[int]int)
scores := make(map[int]float64)
for _, node := range graph.Nodes {
distances[node.ID] = math.Inf(1)
remaining[node.ID] = true
scores[node.ID] = math.Inf(1)
}
distances[src.ID] = 0
backpointers[src.ID] = src.ID
scores[src.ID] = src.Point.Distance(dst.Point)
for len(remaining) > 0 {
var closestNode *Node
var closestDistance float64
var closestScore float64
for nodeID := range remaining {
if !math.IsInf(scores[nodeID], 1) && (closestNode == nil || scores[nodeID] < closestScore) {
closestNode = graph.Nodes[nodeID]
closestDistance = distances[nodeID]
closestScore = scores[nodeID]
}
}
if closestNode == nil {
break
}
delete(remaining, closestNode.ID)
if (params.MaxDistance != 0 && closestDistance > params.MaxDistance) || closestNode == dst {
break
}
for _, edge := range closestNode.Out {
d := closestDistance + edge.Segment().Length()
if remaining[edge.Dst.ID] && d < distances[edge.Dst.ID] {
distances[edge.Dst.ID] = d
backpointers[edge.Dst.ID] = closestNode.ID
scores[edge.Dst.ID] = d + edge.Dst.Point.Distance(dst.Point)
}
}
}
return ShortestPathResult{
source: src,
graph: graph,
Distances: distances,
Remaining: remaining,
Backpointers: backpointers,
}
} | common/graph_algo.go | 0.778481 | 0.545951 | graph_algo.go | starcoder |
package plenc
import "fmt"
// WireType represents a protobuf wire type. It's really all about how you can
// skip over fields in encoded data that aren't recognised because the field no
// longer exists in the struct.
type WireType int8
const (
// WTVarInt signals a variable-length encoded integer. Signed integers are
// encoded with zig-zag encoding first.
WTVarInt WireType = iota
// WT64 signals a 64 bit value. Used for float64
WT64
// WTLength signals length-value data. Length is encoded as a varint and is
// a byte count. This is used for structs and strings, and for slices of
// types encoded using WTVarInt, WT64 or WT32
WTLength
// WTSlice re-uses the code point used for the deprecated 'StartGroup' wire
// type. It is used for slices of types implemented with WTLength. It is
// followed by a count of items in the slice encoded as a VarUint. Each
// entry is then encoded starting with its length encoded as a VarUint.
WTSlice
wtEndGroupDeprecated
// WT32 signals a 32 bit value. Used for float32
WT32
)
// ReadTag reads the wire type and field index from data
func ReadTag(data []byte) (wt WireType, index, n int) {
v, n := ReadVarUint(data)
wt = WireType(v & 0x7)
index = int(v >> 3)
// fmt.Println("tag", wt, index, n)
return wt, index, n
}
// SizeTag determines the space needed to encode a tag
func SizeTag(wt WireType, index int) int {
tag := uint64(index<<3) | uint64(wt)
return SizeVarUint(tag)
}
// AppendTag encodes the tag and appends it to data
func AppendTag(data []byte, wt WireType, index int) []byte {
tag := uint64(index<<3) | uint64(wt)
return AppendVarUint(data, tag)
}
// Skip returns the size of a data item in the encoded data
func Skip(data []byte, wt WireType) (int, error) {
switch wt {
case WTVarInt:
for i, v := range data {
if v&0x80 == 0 {
return i + 1, nil
}
if i > 9 {
return 0, fmt.Errorf("VarInt does not terminate")
}
}
return 0, fmt.Errorf("unexpected end of data. %X", data)
case WT64:
return 8, nil
case WTLength:
l, n := ReadVarUint(data)
if n < 0 {
return 0, fmt.Errorf("corrupt data for WTLength tag")
}
return int(l) + n, nil
case WTSlice:
count, n := ReadVarUint(data)
if n < 0 {
return 0, fmt.Errorf("corrupt data for WTSkip tag")
}
// We now expect count length-value encoded items
offset := n
for i := uint64(0); i < count; i++ {
l, n := ReadVarUint(data[offset:])
if n < 0 {
return 0, fmt.Errorf("corrupt length for entry %d of WTSlice", i)
}
offset += int(l) + n
}
return offset, nil
case WT32:
return 4, nil
}
return 0, fmt.Errorf("unsupported wire type %v", wt)
} | wire.go | 0.58676 | 0.545407 | wire.go | starcoder |
package query
const (
// EqualsOperator takes two operands and tests if they are equal
EqualsOperator eqOperator = "eq"
// NotEqualsOperator takes two operands and tests if they are not equal
NotEqualsOperator neOperator = "ne"
// GreaterThanOperator takes two operands and tests if the left is greater than the right
GreaterThanOperator gtOperator = "gt"
// GreaterThanOrEqualOperator takes two operands and tests if the left is greater than or equal the right
GreaterThanOrEqualOperator geOperator = "ge"
// LessThanOperator takes two operands and tests if the left is lesser than the right
LessThanOperator ltOperator = "lt"
// LessThanOrEqualOperator takes two operands and tests if the left is lesser than or equal the right
LessThanOrEqualOperator leOperator = "le"
// InOperator takes two operands and tests if the left is contained in the right
InOperator inOperator = "in"
// NotInOperator takes two operands and tests if the left is not contained in the right
NotInOperator notInOperator = "notin"
// EqualsOrNilOperator takes two operands and tests if the left is equal to the right, or if the left is nil
EqualsOrNilOperator enOperator = "en"
NoOperator noOperator = "nop"
)
type eqOperator string
func (o eqOperator) String() string {
return string(o)
}
func (eqOperator) IsNumeric() bool {
return false
}
func (eqOperator) Type() OperatorType {
return UnivariateOperator
}
func (eqOperator) IsNullable() bool {
return false
}
type neOperator string
func (o neOperator) String() string {
return string(o)
}
func (neOperator) Type() OperatorType {
return UnivariateOperator
}
func (neOperator) IsNullable() bool {
return false
}
func (neOperator) IsNumeric() bool {
return false
}
type gtOperator string
func (o gtOperator) String() string {
return string(o)
}
func (gtOperator) Type() OperatorType {
return UnivariateOperator
}
func (gtOperator) IsNullable() bool {
return false
}
func (gtOperator) IsNumeric() bool {
return true
}
type ltOperator string
func (o ltOperator) String() string {
return string(o)
}
func (ltOperator) Type() OperatorType {
return UnivariateOperator
}
func (ltOperator) IsNullable() bool {
return false
}
func (ltOperator) IsNumeric() bool {
return true
}
type inOperator string
func (o inOperator) String() string {
return string(o)
}
func (inOperator) Type() OperatorType {
return MultivariateOperator
}
func (inOperator) IsNullable() bool {
return false
}
func (inOperator) IsNumeric() bool {
return false
}
type notInOperator string
func (o notInOperator) String() string {
return string(o)
}
func (notInOperator) Type() OperatorType {
return MultivariateOperator
}
func (notInOperator) IsNullable() bool {
return false
}
func (notInOperator) IsNumeric() bool {
return false
}
type enOperator string
func (o enOperator) String() string {
return string(o)
}
func (enOperator) Type() OperatorType {
return UnivariateOperator
}
func (enOperator) IsNullable() bool {
return true
}
func (enOperator) IsNumeric() bool {
return false
}
type geOperator string
func (o geOperator) String() string {
return string(o)
}
func (geOperator) Type() OperatorType {
return UnivariateOperator
}
func (geOperator) IsNullable() bool {
return false
}
func (geOperator) IsNumeric() bool {
return true
}
type leOperator string
func (o leOperator) String() string {
return string(o)
}
func (leOperator) Type() OperatorType {
return UnivariateOperator
}
func (leOperator) IsNullable() bool {
return false
}
func (leOperator) IsNumeric() bool {
return true
}
type noOperator string
func (o noOperator) String() string {
return string(o)
}
func (noOperator) Type() OperatorType {
return MultivariateOperator
}
func (noOperator) IsNullable() bool {
return false
}
func (noOperator) IsNumeric() bool {
return false
} | pkg/query/operators.go | 0.854263 | 0.894605 | operators.go | starcoder |
Coding Exercise #1
There is an error in the following Go Program. Even though the goroutine is correctly launched, it doesnt print any message.
package main
import (
"fmt"
)
func sayHello(n string) {
fmt.Printf("Hello, %s!\n", n)
}
func main() {
go sayHello("Mr. Wick")
}
Your task is to synchronize main and the goroutine using WaitGroups. The program should print the string received as argument by sayHello().
Are you stuck? Do you want to see the solution for this exercise? Click https://play.golang.org/p/UfQrVmlwrvS.
Coding Exercise #2
1. Create a function called sum() that calculates and then prints out the sum of 2 float numbers it receives as arguments.
Format the result with 2 decimal points.
2. From main launch 3 goroutines that execute the function you have just created (sum)
3. Synchronize the goroutines and the main function using WaitGroups
Are you stuck? Do you want to see the solution for this exercise? Click https://play.golang.org/p/2ArrVTSj55g.
Coding Exercise #3
1. Create an anonymous function that calculates and prints out the square root of a float value it receives as argument.
2. Launch the function as a goroutine and synchronize it with main using WaitGroups
Note: You calculate the square root of a float named f using the Sqrt() function from math package like this:
x := math.Sqrt(f)
fmt.Println(x)
Are you stuck? Do you want to see the solution for this exercise? Click https://play.golang.org/p/4Z_oVsvLyk1.
Coding Exercise #4
Change the code from Exercise #3 and launch 50 goroutines that calculate concurrently the square root of all the numbers between 100 and 149 (both included).
Are you stuck? Do you want to see the solution for this exercise? Click https://play.golang.org/p/W0bUu1wD9Df.
Coding Exercise #5
You work at a Banking Application and have created 2 functions: one that deposits a value into an account and another that withdraws a value from the account.
You want to simulate many deposits and withdraws that take place simultaneously and start some goroutines.
During testing you notice that a date race occurred.
Your task is to change the code in order to protect the accounts balance from simultaneously writing using a mutex.
This is the initial program that has errors:
package main
import (
"fmt"
"sync"
)
func deposit(b *int, n int, wg *sync.WaitGroup) {
*b += n
wg.Done()
}
func withdraw(b *int, n int, wg *sync.WaitGroup) {
*b -= n
wg.Done()
}
func main() {
var wg sync.WaitGroup
wg.Add(200)
balance := 100
for i := 0; i < 100; i++ {
go deposit(&balance, i, &wg)
go withdraw(&balance, i, &wg)
}
wg.Wait()
fmt.Println("Final balance value:", balance)
}
Are you stuck? Do you want to see the solution for this exercise? Click https://play.golang.org/p/R6Cn4td3Z_R. | more_code/coding_tasks/concurrency/gorutines_waitgroups_mutexes.go | 0.665737 | 0.58053 | gorutines_waitgroups_mutexes.go | starcoder |
package common
type Tileset struct {
parent *Map
FirstGID int // the global id (across all tilesets) for the first tile in this set
Name string
OffsetX int
OffsetY int
TileWidth int
TileHeight int
Spacing int
Margin int
terrain []*Terrain
properties map[string]*Property
tiles []*Tile
}
func (t *Tileset) Tiles() []*Tile {
return t.tiles
}
func (t *Tileset) AddTerrain(in ...*Terrain) {
for _, ter := range in {
ter.Id = len(t.terrain)
t.terrain = append(t.terrain, ter)
}
}
func (t *Tileset) Terrain() []*Terrain {
return t.terrain
}
func (t *Tileset) UpdateProperties(props ...*Property) {
for _, prop := range props {
t.properties[prop.Name()] = prop
}
}
func (t *Tileset) Property(name string) (*Property, bool) {
prop, ok := t.properties[name]
return prop, ok
}
func (t *Tileset) Properties() []*Property {
results := []*Property{}
for _, p := range t.properties {
results = append(results, p)
}
return results
}
func (t *Tileset) TileCount() int {
return len(t.tiles)
}
func (t *Tileset) AddTiles(tiles ...*Tile) {
for _, tile := range tiles {
if tile.Source == "" {
continue
}
tile.parent = t
t.tiles = append(t.tiles, tile)
}
}
func (m *Map) NewTileset(name string, tiles ...*Tile) *Tileset {
tset := &Tileset{
parent: m,
Name: name,
properties: make(map[string]*Property),
terrain: []*Terrain{},
tiles: []*Tile{},
TileWidth: m.TileWidth,
TileHeight: m.TileHeight,
}
m.tilesets = append(m.tilesets, tset)
tset.AddTiles(tiles...)
return tset
}
type Tile struct {
parent *Tileset
terrain []*Terrain
properties map[string]*Property
Id int
Type string
Source string
Width int
Height int
Probability float64
Animation *Animation
}
func NewTile(source string) *Tile {
return &Tile{
Source: source,
properties: make(map[string]*Property),
terrain: make([]*Terrain, 4),
}
}
func (t *Tile) GlobalID() int {
return t.Id + t.parent.FirstGID
}
func (t *Tile) TopLeftTerrain() *Terrain {
return t.terrain[0]
}
func (t *Tile) TopRightTerrain() *Terrain {
return t.terrain[1]
}
func (t *Tile) BottomLeftTerrain() *Terrain {
return t.terrain[2]
}
func (t *Tile) BottomRightTerrain() *Terrain {
return t.terrain[3]
}
func (t *Tile) SetTopLeftTerrain(in *Terrain) {
t.terrain[0] = in
}
func (t *Tile) SetTopRightTerrain(in *Terrain) {
t.terrain[1] = in
}
func (t *Tile) SetBottomLeftTerrain(in *Terrain) {
t.terrain[2] = in
}
func (t *Tile) SetBottomRightTerrain(in *Terrain) {
t.terrain[3] = in
}
func (t *Tile) UpdateProperties(props ...*Property) {
for _, prop := range props {
t.properties[prop.Name()] = prop
}
}
func (t *Tile) Terrain() []*Terrain {
return t.terrain
}
func (t *Tile) Property(name string) (*Property, bool) {
prop, ok := t.properties[name]
return prop, ok
}
func (t *Tile) Properties() []*Property {
results := []*Property{}
for _, p := range t.properties {
results = append(results, p)
}
return results
}
func (t *Tile) SetAnimation(frames ...*Frame) {
animation := newAnimation(frames...)
t.Animation = animation
animation.parent = t
}
type Animation struct {
parent *Tile
Frames []*Frame
}
type Frame struct {
parent *Animation
Tile *Tile
Duration int
}
func newAnimation(frames ...*Frame) *Animation {
ani := &Animation{Frames: []*Frame{}}
for _, fr := range frames {
fr.parent = ani
ani.Frames = append(ani.Frames, fr)
}
return ani
}
type Terrain struct {
Id int
Name string
Tile *Tile
properties map[string]*Property
}
func NewTerrain(name string) *Terrain {
return &Terrain{
Name: name,
properties: make(map[string]*Property),
}
}
func (t *Terrain) UpdateProperties(props ...*Property) {
for _, prop := range props {
t.properties[prop.Name()] = prop
}
}
func (t *Terrain) Property(name string) (*Property, bool) {
prop, ok := t.properties[name]
return prop, ok
}
func (t *Terrain) Properties() []*Property {
results := []*Property{}
for _, p := range t.properties {
results = append(results, p)
}
return results
} | common/tileset.go | 0.681727 | 0.416322 | tileset.go | starcoder |
package main
import (
"fmt"
"strings"
"github.com/go-gl/gl/v4.2-core/gl"
"github.com/hexaflex/wireworldgpu/math"
"github.com/pkg/errors"
)
// Shader defines a compiled shader program.
type Shader uint32
// Release cleans up the shader.
func (s Shader) Release() {
gl.DeleteShader(uint32(s))
}
// SetUniformMat4 sets the given uniform to the specified value.
func (s Shader) SetUniformMat4(name string, mat math.Mat4) {
gl.UniformMatrix4fv(s.uniform(name), 1, false, &mat[0])
}
// SetUniformVec2 sets the given uniform to the specified value.
func (s Shader) SetUniformVec2(name string, v math.Vec2) {
gl.Uniform2fv(s.uniform(name), 1, &v[0])
}
// SetUniformVec3 sets the given uniform to the specified value.
func (s Shader) SetUniformVec3(name string, v math.Vec3) {
gl.Uniform3fv(s.uniform(name), 1, &v[0])
}
// SetUniformVec4 sets the given uniform to the specified value.
func (s Shader) SetUniformVec4(name string, v math.Vec4) {
gl.Uniform4fv(s.uniform(name), 1, &v[0])
}
func (s Shader) uniform(name string) int32 {
return gl.GetUniformLocation(uint32(s), gl.Str(name+"\x00"))
}
// Use uses the Shader.
func (s Shader) Use() {
gl.UseProgram(uint32(s))
}
// Unuse unuses the Shader.
func (s Shader) Unuse() {
gl.UseProgram(0)
}
// compile loads a shader from the given sources.
func compile(vertex, geometry, fragment string) (Shader, error) {
var vs, gs, fs uint32
var err error
if len(vertex) > 0 {
vs, err = compileShader(vertex, gl.VERTEX_SHADER)
if err != nil {
return 0, errors.Wrapf(err, "failed to compile vertex shader")
}
defer gl.DeleteShader(vs)
}
if len(geometry) > 0 {
gs, err = compileShader(geometry, gl.GEOMETRY_SHADER)
if err != nil {
return 0, errors.Wrapf(err, "failed to compile geometry shader")
}
defer gl.DeleteShader(gs)
}
if len(fragment) > 0 {
fs, err = compileShader(fragment, gl.FRAGMENT_SHADER)
if err != nil {
return 0, errors.Wrapf(err, "failed to compile fragment shader")
}
defer gl.DeleteShader(fs)
}
program := gl.CreateProgram()
if len(vertex) > 0 {
gl.AttachShader(program, vs)
}
if len(geometry) > 0 {
gl.AttachShader(program, gs)
}
if len(fragment) > 0 {
gl.AttachShader(program, fs)
}
gl.LinkProgram(program)
var status int32
gl.GetProgramiv(program, gl.LINK_STATUS, &status)
if status == gl.FALSE {
var logLength int32
gl.GetProgramiv(program, gl.INFO_LOG_LENGTH, &logLength)
log := strings.Repeat("\x00", int(logLength+1))
gl.GetProgramInfoLog(program, logLength, nil, gl.Str(log))
return 0, fmt.Errorf("failed to link program: %v", log)
}
return Shader(program), nil
}
// compileShader compiles the given shader source into a Shader.
func compileShader(source string, stype uint32) (uint32, error) {
shader := gl.CreateShader(stype)
csources, free := gl.Strs(source + "\x00")
gl.ShaderSource(shader, 1, csources, nil)
free()
gl.CompileShader(shader)
var status int32
gl.GetShaderiv(shader, gl.COMPILE_STATUS, &status)
if status == gl.FALSE {
var logLength int32
gl.GetShaderiv(shader, gl.INFO_LOG_LENGTH, &logLength)
log := strings.Repeat("\x00", int(logLength+1))
gl.GetShaderInfoLog(shader, logLength, nil, gl.Str(log))
return 0, fmt.Errorf("failed to compile %v: %v", source, log)
}
return shader, nil
} | shader.go | 0.712132 | 0.445409 | shader.go | starcoder |
package gfx
import (
"math/rand"
"runtime"
"time"
)
const (
defaultTargetFrameRate = 60
)
var (
width float64
height float64
scaleX float64
scaleY float64
fps int
isFixed bool
targetFrameRate int
targetFrameTime float64
)
// Application defines the interface that must be implemented by an application using the graphics interface.
type Application interface {
// Load called before the application loop starts. Can be used to preload assets like textures etc.
Load()
// Update is called for each update frame of the application. The d argument is the time delta in seconds since the last call to update.
Update(d float64)
// Unload is called when the application shutsdown. Can be used to cleanup/flush any open resources.
Unload()
}
// Font represents a raster font that can be used to render text
type Font struct {
W, H int
FirstChar, LastChar byte
data []byte
}
// Init initialized the graphics system, creates the platform specific window and related graphics devices.
func Init(title string, x, y, w, h, xscale, yscale int) bool {
runtime.LockOSThread()
width = float64(w)
height = float64(h)
scaleX = float64(xscale)
scaleY = float64(yscale)
isFixed = false
SetTargetFrameRate(defaultTargetFrameRate)
iomgr = &ioManager{}
driver.Init()
if !driver.CreateWindow(x, y, w, h, xscale, yscale) {
return false
}
if !driver.CreateDevice() {
return false
}
driver.SetWindowTitle(title)
return true
}
// Run starts the application running and executes the platform specific event loop. This function blocks.
func Run(app Application) {
app.Load()
go run(app)
driver.StartEventLoop()
app.Unload()
}
// Width returns the pixel width of graphics surface. This is an unscaled value.
func Width() float64 {
return width
}
// Height returns the pixel height of the graphics surface. This is an unscaled value.
func Height() float64 {
return height
}
// Fps returns the number of update frames executed in the last second
func Fps() int {
return fps
}
// EnableFixedFrameRate enable or disable fixed frame rate
func EnableFixedFrameRate(state bool) {
isFixed = state
}
// SetTargetFrameRate sets the desired target frame rate when fixed frame rate is enabled
func SetTargetFrameRate(fps int) {
targetFrameRate = fps
targetFrameTime = 1.0 / float64(fps)
}
// TargetFrameRate gets the current target frame rate
func TargetFrameRate() int {
return targetFrameRate
}
// Clear clears the graphics surface using the specified color
func Clear(c Color) {
driver.Clear(c)
}
// SetPixel draws a pixel at the specified coordinates using the passed color
func SetPixel(x, y float64, c Color) {
driver.SetPixel(int(x+0.5), int(y+0.5), c)
}
// KeyPressed returns true if the passed key is currently pressed. KeyPressed can also be used to check the state of the mouse buttons.
func KeyPressed(key Key) bool {
return iomgr.keyPressed(key)
}
// KeyJustPressed returns true if the key was just pressed. This will not continue to return true if the key is held down.
// KeyJustPressed can be used to check the state of the mouse buttons.
// This can be used for one shot key presses, that require the key to be released and repressed for each interaction.
func KeyJustPressed(key Key) bool {
return iomgr.keyJustPressed(key)
}
// MouseXY returns the coordinates of the mouse
func MouseXY() (float64, float64) {
return iomgr.mouseXY()
}
// Color represents a RGB color
type Color uint32
// Rgb creates a new color using the specified RGB color components. Each component is a value between 0 and 255.
func Rgb(r, g, b int) Color {
return Color((uint32(0xff) << 24) | (uint32(r) << 16) | (uint32(g) << 8) | uint32(b))
}
// Rgba creates a new color using the specified RGBA color components. Each component is a value between 0 and 255.
func Rgba(r, g, b, a int) Color {
return Color((uint32(a) << 24) | (uint32(r) << 16) | (uint32(g) << 8) | uint32(b))
}
// R returns the red component of the color
func (c Color) R() int {
return (int(c) >> 16) & 0xff
}
// G returns the green component of the color
func (c Color) G() int {
return (int(c) >> 8) & 0xff
}
// B returns the blue component of the color
func (c Color) B() int {
return int(c) & 0xff
}
// A returns the alpha component of the color
func (c Color) A() int {
return (int(c) >> 24) & 0xff
}
// RandomColor returns a random color
func RandomColor() Color {
return Rgb(rand.Intn(255), rand.Intn(255), rand.Intn(255))
}
func clamp(v, min, max int) int {
if v < min {
return min
}
if v > max {
return max
}
return v
}
// Add combines two colors and returns the resulting color
func (c Color) Add(o Color) Color {
return Rgb(clamp(c.R()+o.R(), 0, 255),
clamp(c.G()+o.G(), 0, 255),
clamp(c.B()+o.B(), 0, 255))
}
// Blend blends the color with a second color
func (c Color) Blend(o Color) Color {
a := c.A() + 1
ia := 256 - c.A()
r := (a*c.R() + ia*o.R()) >> 8
g := (a*c.G() + ia*o.G()) >> 8
b := (a*c.B() + ia*o.B()) >> 8
na := (a*c.A() + ia*o.A()) >> 8
return Rgba(r, g, b, na)
}
// Predefined colors
var (
Transparent = Rgba(0, 0, 0, 0)
Black = Rgb(0, 0, 0)
BrightBlue = Rgb(0, 0, 255)
BrightGreen = Rgb(0, 255, 0)
BrightCyan = Rgb(0, 255, 255)
BrightRed = Rgb(255, 0, 0)
BrightMagenta = Rgb(255, 0, 255)
BrightYellow = Rgb(255, 255, 0)
White = Rgb(255, 255, 255)
Blue = Rgb(0, 0, 192)
Green = Rgb(0, 192, 0)
Cyan = Rgb(0, 192, 192)
Red = Rgb(192, 0, 0)
Magenta = Rgb(192, 0, 192)
Yellow = Rgb(192, 192, 0)
Grey = Rgb(192, 192, 192)
DarkBlue = Rgb(0, 0, 128)
DarkGreen = Rgb(0, 128, 0)
DarkCyan = Rgb(0, 128, 128)
DarkRed = Rgb(128, 0, 0)
DarkMagenta = Rgb(128, 0, 128)
DarkYellow = Rgb(128, 128, 0)
DarkGrey = Rgb(128, 128, 128)
VeryDarkBlue = Rgb(0, 0, 64)
VeryDarkGreen = Rgb(0, 64, 0)
VeryDarkCyan = Rgb(0, 64, 64)
VeryDarkRed = Rgb(64, 0, 0)
VeryDarkMagenta = Rgb(64, 0, 64)
VeryDarkYellow = Rgb(64, 64, 0)
VeryDarkGrey = Rgb(64, 64, 64)
ZXBlack = Black // 0
ZXBlue = Rgb(0, 0, 205) // 1
ZXRed = Rgb(205, 0, 0) // 2
ZXMagenta = Rgb(205, 0, 205) // 3
ZXGreen = Rgb(0, 205, 0) // 4
ZXCyan = Rgb(0, 205, 205) // 5
ZXYellow = Rgb(205, 205, 0) // 6
ZXWhite = Rgb(205, 205, 205) // 7
ZXBrightBlack = Black // 0 + Bright
ZXBrightBlue = BrightBlue // 1 + Bright
ZXBrightRed = BrightRed // 2 + Bright
ZXBrightMagenta = BrightMagenta // 3 + Bright
ZXBrightGreen = BrightGreen // 4 + Bright
ZXBrightCyan = BrightCyan // 5 + Bright
ZXBrightYellow = BrightYellow // 6 + Bright
ZXBrightWhite = White // 7 + Bright
C64Black = Black // 0
C64White = White // 1
C64Red = Rgb(146, 74, 64) // 2
C64Cyan = Rgb(132, 197, 204) // 3
C64Purple = Rgb(147, 81, 182) // 4
C64Green = Rgb(114, 177, 75) // 5
C64Blue = Rgb(72, 58, 170) // 6
C64Yellow = Rgb(213, 223, 124) // 7
C64Orange = Rgb(153, 105, 45) // 8
C64Brown = Rgb(103, 82, 0) // 9
C64LightRed = Rgb(193, 129, 120) // 10
C64DarkGrey = Rgb(96, 96, 96) // 11
C64Grey = Rgb(138, 138, 138) // 12
C64LightGreen = Rgb(179, 236, 145) // 13
C64LightBlue = Rgb(134, 122, 222) // 14
C64LightGrey = Rgb(179, 179, 179) // 15
)
func run(app Application) {
running = true
lastUpdate := time.Now()
lastRender := time.Now()
frameTimer := 0.0
for running {
startUpdate := time.Now()
elapsedTime := startUpdate.Sub(lastUpdate).Seconds()
lastUpdate = startUpdate
frameTimer += elapsedTime
driver.Update(elapsedTime)
iomgr.update(elapsedTime)
if !isFixed || frameTimer >= targetFrameTime {
startRender := time.Now()
delta := startRender.Sub(lastRender).Seconds()
lastRender = startRender
app.Update(delta)
driver.Render(delta)
fps = (int)(1.0/delta + 0.5)
frameTimer -= targetFrameTime
}
}
}
func shutdown() {
running = false
} | pkg/gfx/gfx.go | 0.826327 | 0.425367 | gfx.go | starcoder |
package numint
// GaussLegendre returns a GaussLegendre rule of specified degree.
// The rules are based off of hard-coded constants, and are implemented up to n=12.
func GaussLegendre(degree int) *GaussLegendreRule {
if degree > len(weightValues) || degree < 1 {
return &GaussLegendreRule{}
}
weights := weightValues[degree-1]
points := xValues[degree-1]
var abscissae, weightCoeffs []float64
if degree%2 == 0 {
abscissae = expandEvenAbsiccae(points)
weightCoeffs = expandEvenWeights(weights)
} else {
abscissae = expandOddAbsiccae(points)
weightCoeffs = expandOddWeights(weights)
}
return &GaussLegendreRule{
abscissae: abscissae,
weightCoeffs: weightCoeffs,
}
}
// GaussLegendreRule provides Weights and Points functions for Gauss Legendre quadrature rules.
type GaussLegendreRule struct {
abscissae, weightCoeffs []float64
}
// Weights returns the quadrature weights to use for the interval [a, b].
// The number of points returned depends on the degree of the rule.
func (g *GaussLegendreRule) Weights(a float64, b float64) []float64 {
weights := make([]float64, len(g.weightCoeffs))
for i := range g.weightCoeffs {
weights[i] = g.weightCoeffs[i] * (b - a) / 2
}
return weights
}
// Points returns the quadrature sampling points to use for the interval [a, b].
// The number of points returned depends on the degree of the rule.
func (g GaussLegendreRule) Points(a float64, b float64) []float64 {
points := make([]float64, len(g.abscissae))
for i := range g.abscissae {
points[i] = g.abscissae[i]*(b-a)/2 + (b+a)/2
}
return points
}
// source: http://www.holoborodko.com/pavel/numerical-methods/numerical-integration/
var xValues = [][]float64{
{0}, // n=1
{0.5773502691896257645091488}, // n=2
{0.0000000000000000000000000, 0.7745966692414833770358531}, // n=3
{0.3399810435848562648026658, 0.8611363115940525752239465}, // n=4
{0.0000000000000000000000000, 0.5384693101056830910363144, 0.9061798459386639927976269}, // n=5
{0.2386191860831969086305017, 0.6612093864662645136613996, 0.9324695142031520278123016},
{0.0000000000000000000000000, 0.4058451513773971669066064, 0.7415311855993944398638648, 0.9491079123427585245261897},
{0.1834346424956498049394761, 0.5255324099163289858177390, 0.7966664774136267395915539, 0.9602898564975362316835609},
{0.0000000000000000000000000, 0.3242534234038089290385380, 0.6133714327005903973087020, 0.8360311073266357942994298, 0.9681602395076260898355762},
{0.1488743389816312108848260, 0.4333953941292471907992659, 0.6794095682990244062343274, 0.8650633666889845107320967, 0.9739065285171717200779640},
{0.0000000000000000000000000, 0.2695431559523449723315320, 0.5190961292068118159257257, 0.7301520055740493240934163, 0.8870625997680952990751578, 0.9782286581460569928039380},
{0.1252334085114689154724414, 0.3678314989981801937526915, 0.5873179542866174472967024, 0.7699026741943046870368938, 0.9041172563704748566784659, 0.9815606342467192506905491},
}
var weightValues = [][]float64{
{2}, // n=1
{1}, // n=2
{0.8888888888888888888888889, 0.5555555555555555555555556}, // n=3
{0.6521451548625461426269361, 0.3478548451374538573730639}, // n=4
{0.5688888888888888888888889, 0.4786286704993664680412915, 0.2369268850561890875142640}, // n=5
{0.4679139345726910473898703, 0.3607615730481386075698335, 0.1713244923791703450402961},
{0.4179591836734693877551020, 0.3818300505051189449503698, 0.2797053914892766679014678, 0.1294849661688696932706114},
{0.3626837833783619829651504, 0.3137066458778872873379622, 0.2223810344533744705443560, 0.1012285362903762591525314},
{0.3302393550012597631645251, 0.3123470770400028400686304, 0.2606106964029354623187429, 0.1806481606948574040584720, 0.0812743883615744119718922},
{0.2955242247147528701738930, 0.2692667193099963550912269, 0.2190863625159820439955349, 0.1494513491505805931457763, 0.0666713443086881375935688},
{0.2729250867779006307144835, 0.2628045445102466621806889, 0.2331937645919904799185237, 0.1862902109277342514260976, 0.1255803694649046246346943, 0.0556685671161736664827537},
{0.2491470458134027850005624, 0.2334925365383548087608499, 0.2031674267230659217490645, 0.1600783285433462263346525, 0.1069393259953184309602547, 0.0471753363865118271946160},
}
func expandEvenAbsiccae(abs []float64) []float64 {
n := 2 * len(abs)
result := make([]float64, n)
for i := range result {
if i < n/2 {
result[i] = -1 * abs[(n-2)/2-i]
} else {
result[i] = abs[i-n/2]
}
}
return result
}
func expandEvenWeights(ws []float64) []float64 {
n := 2 * len(ws)
result := make([]float64, n)
for i := range result {
if i < n/2 {
result[i] = ws[(n-2)/2-i]
} else {
result[i] = ws[i-n/2]
}
}
return result
}
func expandOddAbsiccae(abs []float64) []float64 {
n := 2*len(abs) - 1
result := make([]float64, n)
for i := range result {
if i < n/2 {
result[i] = -1 * abs[(n-1)/2-i]
} else {
result[i] = abs[i-(n-1)/2]
}
}
return result
}
func expandOddWeights(weights []float64) []float64 {
n := 2*len(weights) - 1
result := make([]float64, n)
for i := range result {
if i < n/2 {
result[i] = weights[(n-1)/2-i]
} else {
result[i] = weights[i-(n-1)/2]
}
}
return result
} | numint/gauss_legendre.go | 0.834879 | 0.599602 | gauss_legendre.go | starcoder |
package geom
import (
"fmt"
"math"
"github.com/peterstace/simplefeatures/rtree"
)
// line represents a line segment between two XY locations. It's an invariant
// that a and b are distinct XY values. Do not create a line that has the same
// a and b value.
type line struct {
a, b XY
}
// uncheckedEnvelope directly constructs an Envelope that bounds the line. It
// skips envelope validation because line coordinates never come directly from
// users. Instead, line coordinates come directly from pre-validated
// LineStrings, or from operations on pre-validated geometries.
func (ln line) uncheckedEnvelope() Envelope {
ln.a.X, ln.b.X = sortFloat64Pair(ln.a.X, ln.b.X)
ln.a.Y, ln.b.Y = sortFloat64Pair(ln.a.Y, ln.b.Y)
return newUncheckedEnvelope(ln.a, ln.b)
}
func (ln line) box() rtree.Box {
ln.a.X, ln.b.X = sortFloat64Pair(ln.a.X, ln.b.X)
ln.a.Y, ln.b.Y = sortFloat64Pair(ln.a.Y, ln.b.Y)
return rtree.Box{
MinX: ln.a.X,
MinY: ln.a.Y,
MaxX: ln.b.X,
MaxY: ln.b.Y,
}
}
func (ln line) length() float64 {
dx := ln.b.X - ln.a.X
dy := ln.b.Y - ln.a.Y
return math.Sqrt(dx*dx + dy*dy)
}
func (ln line) centroid() XY {
return XY{
0.5 * (ln.a.X + ln.b.X),
0.5 * (ln.a.Y + ln.b.Y),
}
}
func (ln line) asLineString() LineString {
ls, err := NewLineString(NewSequence([]float64{
ln.a.X, ln.a.Y,
ln.b.X, ln.b.Y,
}, DimXY))
if err != nil {
// Should not occur, because we know that a and b are distinct.
panic(fmt.Sprintf("could not create line string: %v", err))
}
return ls
}
func (ln line) intersectsXY(xy XY) bool {
// Speed is O(1) using a bounding box check then a point-on-line check.
env := ln.uncheckedEnvelope()
if !env.Contains(xy) {
return false
}
lhs := (xy.X - ln.a.X) * (ln.b.Y - ln.a.Y)
rhs := (xy.Y - ln.a.Y) * (ln.b.X - ln.a.X)
return lhs == rhs
}
func (ln line) hasEndpoint(xy XY) bool {
return ln.a == xy || ln.b == xy
}
// lineWithLineIntersection represents the result of intersecting two line
// segments together. It can either be empty (flag set), a single point (both
// points set the same), or a line segment (defined by the two points).
type lineWithLineIntersection struct {
empty bool
ptA, ptB XY
}
// intersectLine calculates the intersection between two line
// segments without performing any heap allocations.
func (ln line) intersectLine(other line) lineWithLineIntersection {
a := ln.a
b := ln.b
c := other.a
d := other.b
o1 := orientation(a, b, c)
o2 := orientation(a, b, d)
o3 := orientation(c, d, a)
o4 := orientation(c, d, b)
if o1 != o2 && o3 != o4 {
if o1 == collinear {
return lineWithLineIntersection{false, c, c}
}
if o2 == collinear {
return lineWithLineIntersection{false, d, d}
}
if o3 == collinear {
return lineWithLineIntersection{false, a, a}
}
if o4 == collinear {
return lineWithLineIntersection{false, b, b}
}
e := (c.Y-d.Y)*(a.X-c.X) + (d.X-c.X)*(a.Y-c.Y)
f := (d.X-c.X)*(a.Y-b.Y) - (a.X-b.X)*(d.Y-c.Y)
// Division by zero is not possible, since the lines are not parallel.
p := e / f
pt := b.Sub(a).Scale(p).Add(a)
return lineWithLineIntersection{false, pt, pt}
}
if o1 == collinear && o2 == collinear {
if (!onSegment(a, b, c) && !onSegment(a, b, d)) && (!onSegment(c, d, a) && !onSegment(c, d, b)) {
return lineWithLineIntersection{empty: true}
}
// ---------------------
// This block is to remove the collinear points in between the two endpoints
pts := make([]XY, 0, 4)
pts = append(pts, a, b, c, d)
rth := rightmostThenHighestIndex(pts)
pts = append(pts[:rth], pts[rth+1:]...)
ltl := leftmostThenLowestIndex(pts)
pts = append(pts[:ltl], pts[ltl+1:]...)
// pts[0] and pts[1] _may_ be coincident, but that's ok.
return lineWithLineIntersection{false, pts[0], pts[1]}
//----------------------
}
return lineWithLineIntersection{empty: true}
}
// onSegement checks if point r on the segment formed by p and q.
// p, q and r should be collinear
func onSegment(p XY, q XY, r XY) bool {
return r.X <= fastMax(p.X, q.X) &&
r.X >= fastMin(p.X, q.X) &&
r.Y <= fastMax(p.Y, q.Y) &&
r.Y >= fastMin(p.Y, q.Y)
}
// rightmostThenHighestIndex finds the rightmost-then-highest point
func rightmostThenHighestIndex(ps []XY) int {
rpi := 0
for i := 1; i < len(ps); i++ {
if ps[i].X > ps[rpi].X ||
(ps[i].X == ps[rpi].X &&
ps[i].Y > ps[rpi].Y) {
rpi = i
}
}
return rpi
}
// leftmostThenLowestIndex finds the index of the leftmost-then-lowest point.
func leftmostThenLowestIndex(ps []XY) int {
rpi := 0
for i := 1; i < len(ps); i++ {
if ps[i].X < ps[rpi].X ||
(ps[i].X == ps[rpi].X &&
ps[i].Y < ps[rpi].Y) {
rpi = i
}
}
return rpi
} | geom/line.go | 0.77907 | 0.502625 | line.go | starcoder |
package jsoncs
import (
"bytes"
)
// Valid reports whether the JSON input is in canonical form.
// Invalid JSON input is reported as false.
func Valid(b []byte) bool {
b, ok := validValue(b)
return ok && len(b) == 0
}
// validValue reports whether the next JSON value is in its canonical form.
// It consume the leading value, and returns the remaining bytes.
func validValue(b []byte) ([]byte, bool) {
switch {
case len(b) > 0 && b[0] == '{':
return validObject(b)
case len(b) > 0 && b[0] == '[':
return validArray(b)
case len(b) > 0 && b[0] == '"':
return validString(b)
case len(b) > 0 && (b[0] == '-' || ('0' <= b[0] && b[0] <= '9')):
return validNumber(b)
case bytes.HasPrefix(b, nullLiteral):
return b[len(nullLiteral):], true
case bytes.HasPrefix(b, trueLiteral):
return b[len(trueLiteral):], true
case bytes.HasPrefix(b, falseLiteral):
return b[len(falseLiteral):], true
default:
return b, false
}
}
// validObject reports whether the next JSON object is in its canonical form
// per RFC 8785, section 3.2.3 regarding object name ordering.
// It consume the leading value, and returns the remaining bytes.
func validObject(b []byte) ([]byte, bool) {
if len(b) == 0 || b[0] != '{' {
return b, false
}
b = b[1:]
var init, ok bool
var prevKey string
for {
if len(b) > 0 && b[0] == '}' {
return b[1:], true
}
if init {
if len(b) == 0 || b[0] != ',' {
return b, false
}
b = b[1:]
}
currKey, _, _ := decodeString(b)
b, ok = validString(b)
if !ok {
return b, ok
}
if init && !lessUTF16(prevKey, currKey) {
return b, ok
}
prevKey = currKey
if len(b) == 0 || b[0] != ':' {
return b, false
}
b = b[1:]
b, ok = validValue(b)
if !ok {
return b, ok
}
init = true
}
}
// validArray reports whether the next JSON array is in its canonical form.
// It consume the leading value, and returns the remaining bytes.
func validArray(b []byte) ([]byte, bool) {
if len(b) == 0 || b[0] != '[' {
return b, false
}
b = b[1:]
var init, ok bool
for {
if len(b) > 0 && b[0] == ']' {
return b[1:], true
}
if init {
if len(b) == 0 || b[0] != ',' {
return b, false
}
b = b[1:]
}
b, ok = validValue(b)
if !ok {
return b, ok
}
init = true
}
}
// validString reports whether the next JSON string is in its canonical form
// per RFC 8785, section 3.2.2.2.
// It consume the leading value, and returns the remaining bytes.
func validString(b []byte) ([]byte, bool) {
if len(b) == 0 || b[0] != '"' {
return b, false
}
// Fast-path optimization for unescaped ASCII.
for b := b[1:]; len(b) > 0; b = b[1:] {
if b[0] == '"' {
return b[1:], true
}
if !(0x20 <= b[0] && b[0] < 0x80 && b[0] != '"' && b[0] != '\\') {
break
}
}
s, b2, err := decodeString(b)
got := b[:len(b)-len(b2)]
want, _ := formatString(nil, s)
return b2, bytes.Equal(got, want) && err == nil
}
// validNumber reports whether the next JSON number is in its canonical form
// per RFC 8785, section 3.2.2.3.
// It consume the leading value, and returns the remaining bytes.
func validNumber(b []byte) ([]byte, bool) {
if len(b) == 0 || !(b[0] == '-' || ('0' <= b[0] && b[0] <= '9')) {
return b, false
}
// Fast-path optimization for integers.
// Integer values in the range of ±2⁵³ are represented in decimal,
// which is encoded using up to 16 digits (excluding the sign).
{
b := b
var neg bool
if len(b) > 0 && b[0] == '-' {
b = b[1:]
neg = true
}
switch {
case len(b) == 0:
break
case b[0] == '0':
b = b[1:]
if neg {
break // -0 is not permitted
}
if len(b) > 0 && (b[0] == '.' || b[0] == 'e' || b[0] == 'E') {
break // number is not yet terminated
}
return b, true
case '1' <= b[0] && b[0] <= '9':
var n int
b = b[1:]
n++
for len(b) > 0 && ('0' <= b[0] && b[0] <= '9') {
b = b[1:]
n++
}
if n >= 16 {
break // possibly exceeds ±2⁵³
}
if len(b) > 0 && (b[0] == '.' || b[0] == 'e' || b[0] == 'E') {
break // number is not yet terminated
}
return b, true
}
}
f, b2, err := decodeNumber(b)
got := b[:len(b)-len(b2)]
want, _ := formatNumber(nil, f)
return b2, bytes.Equal(got, want) && err == nil
} | jsoncs/valid.go | 0.725454 | 0.422624 | valid.go | starcoder |
// Package changepoint implements algorithms for changepoint detection.
package changepoint
import "math"
// Calculates the cost of the (tau1; tau2] segment.
// Remember that tau are one-based indexes.
type costFunc func(tau1 int, tau2 int) float64
func pelt(data []float64, minSegment int, cost costFunc, penalty float64) []int {
n := len(data)
// We will use dynamic programming to find the best solution; `bestCost` is the cost array.
// `bestCost[i]` is the cost for subarray `data[0..i-1]`.
// It's a 1-based array (`data[0]`..`data[n-1]` correspond to `bestCost[1]`..`bestCost[n]`)
bestCost := make([]float64, n+1)
bestCost[0] = -penalty
for curTau := minSegment; curTau < 2*minSegment; curTau++ {
bestCost[curTau] = cost(0, curTau)
}
// `prevChangepointIndex` is an array of references to previous changepoints. If the current segment ends at
// the position `i`, the previous segment ends at the position `prevChangepointIndex[i]`. It's a 1-based
// array (`data[0]`..`data[n-1]` correspond to the `prevChangepointIndex[1]`..`prevChangepointIndex[n]`)
prevChangepointIndex := make([]int, n+1)
// We use PELT (Pruned Exact Linear Time) approach which means that instead of enumerating all possible previous
// tau values, we use a whitelist of "good" tau values that can be used in the optimal solution. If we are 100%
// sure that some of the tau values will not help us to form the optimal solution, such values should be
// removed. See [Killick2012] for details.
prevTaus := make([]int, n+1) // The maximum number of the previous tau values is n + 1
prevTaus[0] = 0
prevTaus[1] = minSegment
costForPrevTau := make([]float64, n+1)
prevTausCount := 2 // The counter of previous tau values. Defines the size of `prevTaus` and `costForPrevTau`.
// Following the dynamic programming approach, we enumerate all tau positions. For each `curTau`, we pretend
// that it's the end of the last segment and trying to find the end of the previous segment.
for curTau := 2 * minSegment; curTau < n+1; curTau++ {
// For each previous tau, we should calculate the cost of taking this tau as the end of the previous
// segment. This cost equals the cost for the `prevTau` plus cost of the new segment (from `prevTau`
// to `curTau`) plus penalty for the new changepoint.
for i, prevTau := range prevTaus[:prevTausCount] {
costForPrevTau[i] = bestCost[prevTau] + cost(prevTau, curTau) + penalty
}
// Now we should choose the tau that provides the minimum possible cost.
bestPrevTauIndex, curBestCost := whichMin(costForPrevTau[:prevTausCount])
bestCost[curTau] = curBestCost
prevChangepointIndex[curTau] = prevTaus[bestPrevTauIndex]
// Prune phase: we remove "useless" tau values that will not help to achieve minimum cost in the future
newPrevTausCount := 0
for i, prevTauCost := range costForPrevTau[:prevTausCount] {
if prevTauCost < curBestCost+penalty {
prevTaus[newPrevTausCount] = prevTaus[i]
newPrevTausCount++
}
}
// We add a new tau value that is located on the `minSegment` distance from the next `curTau` value
prevTaus[newPrevTausCount] = curTau - minSegment + 1
prevTausCount = newPrevTausCount + 1
}
// Here we collect the result list of changepoint indexes `changepoints` using `prevChangepointIndex`
var changepoints []int
// The index of the end of the last segment is `n`
for i := prevChangepointIndex[n]; i != 0; i = prevChangepointIndex[i] {
changepoints = append(changepoints, i) // 1-based index of the end of segment is equal to 0-based index of the beginning of next segment
}
// The result changepoints should be sorted in ascending order.
for l, r := 0, len(changepoints)-1; l < r; l, r = l+1, r-1 {
changepoints[l], changepoints[r] = changepoints[r], changepoints[l]
}
return changepoints
}
func whichMin(data []float64) (int, float64) {
ix, min := -1, math.Inf(1)
for i, v := range data {
if v < min {
ix, min = i, v
}
}
return ix, min
} | changepoint.go | 0.84124 | 0.77949 | changepoint.go | starcoder |
package graph
import "fmt"
type Node interface {
ID() int
Edges() []Edge
Degree() int
Neighbors(EdgeFilter) []Node
Hops(EdgeFilter) []*Hop
add(Edge)
drop(Edge)
dropAll()
index() int
setIndex(int)
setID(int)
}
var _ Node = (*node)(nil)
// NodeFilter is a function type used for assessment of nodes during graph traversal.
type NodeFilter func(Node) bool
// A Node is a node in a graph.
type node struct {
id int
i int
edges Edges
}
// newNode creates a new *Nodes with ID id. Nodes should only ever exist in the context of a
// graph, so this is not a public function.
func newNode(id int) Node {
return &node{
id: id,
}
}
// A Hop is an edge/node pair where the edge leads to the node from a neighbor.
type Hop struct {
Edge Edge
Node Node
}
// ID returns the id of a node.
func (n *node) ID() int {
return n.id
}
// Edges returns a slice of edges that are incident on the node.
func (n *node) Edges() []Edge {
if len(n.edges) == 0 {
return nil
}
return n.edges
}
// Degree returns the number of incident edges on a node. Looped edges are counted at both ends.
func (n *node) Degree() int {
l := 0
for _, e := range n.edges {
if e.Head() == e.Tail() {
l++
}
}
return l + len(n.edges)
}
// Neighbors returns a slice of nodes that share an edge with the node. Multiply connected nodes are
// repeated in the slice. If the node is n-connected it will be included in the slice, potentially
// repeatedly if there are multiple n-connecting edges. If ef is nil all edges are included.
func (n *node) Neighbors(ef EdgeFilter) []Node {
var nodes []Node
for _, e := range n.edges {
if ef == nil || ef(e) {
if a := e.Tail(); a.ID() == n.ID() {
nodes = append(nodes, e.Head())
} else {
nodes = append(nodes, a)
}
}
}
return nodes
}
// Hops has essentially the same functionality as Neighbors with the exception that the connecting
// edge is also returned.
func (n *node) Hops(ef EdgeFilter) []*Hop {
var h []*Hop
for _, e := range n.edges {
if ef == nil || ef(e) {
if a := e.Tail(); a.ID() == n.ID() {
h = append(h, &Hop{e, e.Head()})
} else {
h = append(h, &Hop{e, a})
}
}
}
return h
}
func (n *node) add(e Edge) { n.edges = append(n.edges, e) }
func (n *node) dropAll() {
for i := range n.edges {
n.edges[i] = nil
}
n.edges = n.edges[:0]
}
func (n *node) drop(e Edge) {
for i := 0; i < len(n.edges); {
if n.edges[i] == e {
n.edges = n.edges.delFromNode(i)
break // assumes e has not been added more than once - this should not happen, but we don't check for it
} else {
i++
}
}
}
func (n *node) setID(id int) { n.id = id }
func (n *node) setIndex(i int) { n.i = i }
func (n *node) index() int { return n.i }
func (n *node) String() string {
return fmt.Sprintf("%d:%v", n.id, n.edges)
}
// Nodes is a collection of nodes.
type Nodes []Node
// BuildUndirected creates a new Undirected graph using nodes and edges specified by the
// set of nodes in the receiver. If edges of nodes in the receiver connect to nodes that are not, these extra nodes
// will be included in the resulting graph. If compact is set to true, edge IDs are chosen to minimize
// space consumption, but breaking edge ID consistency between the new graph and the original.
func (ns Nodes) BuildUndirected(compact bool) (*Undirected, error) {
seen := make(map[Edge]struct{})
g := NewUndirected()
for _, n := range ns {
g.AddID(n.ID())
for _, e := range n.Edges() {
if _, ok := seen[e]; ok {
continue
}
seen[e] = struct{}{}
u, v := e.Nodes()
uid, vid := u.ID(), v.ID()
if uid < 0 || vid < 0 {
return nil, NodeIDOutOfRange
}
g.AddID(uid)
g.AddID(vid)
var ne Edge
if compact {
ne = g.newEdge(g.nodes[uid], g.nodes[vid])
} else {
ne = g.newEdgeKeepID(e.ID(), g.nodes[uid], g.nodes[vid])
}
g.nodes[uid].add(ne)
if vid != uid {
g.nodes[vid].add(ne)
}
}
}
return g, nil
}
func (ns Nodes) delFromGraph(i int) Nodes {
ns[i], ns[len(ns)-1] = ns[len(ns)-1], ns[i]
ns[i].setIndex(i)
ns[len(ns)-1].setIndex(-1)
return ns[:len(ns)-1]
} | node.go | 0.694303 | 0.443359 | node.go | starcoder |
// Package grafestes contains the consonant vowel names and timing information for the sound sequences used for the
// research reported in "Listening Through Voices: Infant Statistical Word Segmentation Across Multiple Speakers",
// <NAME> & Lew-Williams, 2015.
// The sounds are spoken consonant-vowels that were spliced together from eight (?) women.
// See the paper for the details on how the sequences were contructed
package grafestes
import (
"bufio"
"log"
"os"
"strconv"
"strings"
"github.com/emer/auditory/speech"
)
var CVs = []string{"ti", "do", "ga", "mo", "may", "bu", "pi", "ku"}
var CVsPerWord = 2 // The graf-estes experiment used 2 syllable words
var CVsPerPos = 4 // The graf-estes experiment had 4 cv possibilities per syllable position
// LoadTranscription reads in a list of cv strings for decoding a particular sequence and returns a slice of strings
func LoadTranscription(fn string) ([]string, error) {
//fmt.Println
var names []string
fp2, err := os.Open(fn)
if err != nil {
log.Println(err)
return names, err
}
defer fp2.Close() // we will be done with the file within this function
scanner2 := bufio.NewScanner(fp2)
scanner2.Split(bufio.ScanLines)
s := ""
for scanner2.Scan() {
s = scanner2.Text()
}
names = strings.Split(s, " ")
return names, nil
}
// LoadTimes loads the timing and sequence (transcription) data for CV files
func LoadTimes(fn string, names []string) ([]speech.SpeechUnit, error) {
//fmt.Println("LoadCVTimes")
var units []speech.SpeechUnit
fp, err := os.Open(fn)
if err != nil {
log.Println(err)
log.Println("Make sure you have the sound files rsyncd to your ccn_images directory and a link (ln -s) to ccn_images in your sim working directory")
return units, err
}
defer fp.Close() // we will be done with the file within this function
scanner := bufio.NewScanner(fp)
scanner.Split(bufio.ScanLines)
i := 0
for scanner.Scan() {
t := scanner.Text()
if t == "" {
break
} else if strings.HasPrefix(t, "\\") { // lines starting with '/' are lines with frequency for start/end points
continue
}
cvt := new(speech.SpeechUnit)
units = append(units, *cvt)
cvs := strings.Fields(t)
f, err := strconv.ParseFloat(cvs[0], 64)
if err == nil {
(units)[i].Start = f * 1000 // convert to milliseconds
}
f, err = strconv.ParseFloat(cvs[1], 64)
if err == nil {
(units)[i].End = f * 1000 // convert to milliseconds
}
(units)[i].Name = names[i]
i++
if i == len(names) {
return units, nil
} // handles case where there may be lines after last line of start, end, name
}
return units, nil
}
// IdxFmSnd returns the slice index of the snd if found.
// id is ignored if the corpus doesn't have subsets of sounds
func IdxFmSnd(s string, id string) (val int, ok bool) {
val = -1
ok = false
for i, cv := range CVs {
if s == cv {
val = i
ok = true
return
}
}
return
}
// SndFmIdx returns the sound if found in the slice of sounds of the corpus.
// id is ignored if the corpus doesn't have subsets of sounds
func SndFmIdx(idx int, id string) (cv string, ok bool) {
cv = ""
ok = false
if idx >= 0 && idx < len(CVs) {
cv = CVs[idx]
ok = true
return
}
return
} | speech/grafestes/grafestes.go | 0.688783 | 0.473596 | grafestes.go | starcoder |
package hangul
// Check Given rune is Lead consonant
func IsLead(r rune) bool {
if LEAD_G <= r && r <= LEAD_H {
return true
}
return false
}
// Check Given rune is Medial vowel
func IsMedial(r rune) bool {
if MEDIAL_A <= r && r <= MEDIAL_I {
return true
}
return false
}
// Check Given rune is Tail consonant
func IsTail(r rune) bool {
if TAIL_G <= r && r <= TAIL_H {
return true
}
return false
}
// Check Given rune is Hangul Jaeum
func IsJaeum(r rune) bool {
switch {
case G <= r && r <= H:
return true
case IsLead(r):
return true
case IsTail(r):
return true
}
return false
}
// Check Given rune is Hangul Moeum
func IsMoeum(r rune) bool {
switch {
case A <= r && r <= I:
return true
case IsMedial(r):
return true
}
return false
}
var multiElements = map[rune][]rune{
GG: []rune{G, G},
GS: []rune{G, S},
NJ: []rune{N, J},
NH: []rune{N, H},
DD: []rune{D, D},
LG: []rune{L, G},
LM: []rune{L, M},
LB: []rune{L, B},
LS: []rune{L, S},
LT: []rune{L, T},
LP: []rune{L, P},
LH: []rune{L, H},
BB: []rune{B, B},
BS: []rune{B, S},
SS: []rune{S, S},
JJ: []rune{J, J},
AE: []rune{A, I},
E: []rune{EO, I},
YAE: []rune{YA, I},
YE: []rune{YEO, I},
WA: []rune{O, A},
WAE: []rune{O, A, I},
OE: []rune{O, I},
WEO: []rune{U, EO},
WE: []rune{U, E},
WI: []rune{U, I},
YI: []rune{EU, I},
}
// Split multi-element compatibility jamo
func SplitMultiElement(r rune) ([]rune, bool) {
r = CompatJamo(r)
es, ok := multiElements[r]
return es, ok
}
var toCompatJamo = map[rune]rune{
LEAD_G: G,
TAIL_G: G,
LEAD_GG: GG,
TAIL_GG: GG,
TAIL_GS: GS,
LEAD_N: N,
TAIL_N: N,
TAIL_NJ: NJ,
TAIL_NH: NH,
LEAD_D: D,
TAIL_D: D,
LEAD_DD: DD,
LEAD_R: L,
TAIL_L: L,
TAIL_LG: LG,
TAIL_LM: LM,
TAIL_LB: LB,
TAIL_LS: LS,
TAIL_LT: LT,
TAIL_LP: LP,
TAIL_LH: LH,
LEAD_M: M,
TAIL_M: M,
LEAD_B: B,
TAIL_B: B,
LEAD_BB: BB,
TAIL_BS: BS,
LEAD_S: S,
TAIL_S: S,
LEAD_SS: SS,
TAIL_SS: SS,
LEAD_ZS: ZS,
TAIL_NG: ZS,
LEAD_J: J,
TAIL_J: J,
LEAD_JJ: JJ,
LEAD_C: C,
TAIL_C: C,
LEAD_K: K,
TAIL_K: K,
LEAD_T: T,
TAIL_T: T,
LEAD_P: P,
TAIL_P: P,
LEAD_H: H,
TAIL_H: H,
}
// Convert lead, medial, tail to compatibility jamo
func CompatJamo(r rune) rune {
switch {
case G <= r && r <= H:
return r
case A <= r && r <= I:
return r
case MEDIAL_A <= r && r <= MEDIAL_I:
return r - MEDIAL_BASE + A
}
if c, ok := toCompatJamo[r]; ok {
return c
}
return 0
}
var toLead = map[rune]rune{
G: LEAD_G,
GG: LEAD_GG,
N: LEAD_N,
D: LEAD_D,
DD: LEAD_DD,
L: LEAD_R,
M: LEAD_M,
B: LEAD_B,
BB: LEAD_BB,
S: LEAD_S,
SS: LEAD_SS,
ZS: LEAD_ZS,
J: LEAD_J,
JJ: LEAD_JJ,
C: LEAD_C,
K: LEAD_K,
T: LEAD_T,
P: LEAD_P,
H: LEAD_H,
}
// Convert compatibility jaeum to corresponding lead consonant
func Lead(c rune) rune {
if LEAD_G <= c && c <= LEAD_H {
return c
}
if l, ok := toLead[c]; ok {
return l
}
return 0
}
// Convert compatibility moeum to corresponding medial vowel
func Medial(c rune) rune {
switch {
case MEDIAL_A <= c && c <= MEDIAL_I:
return c
case A <= c && c <= I:
return c - A + MEDIAL_BASE
}
return 0
}
var toTail = map[rune]rune{
G: TAIL_G,
GG: TAIL_GG,
GS: TAIL_GS,
N: TAIL_N,
NJ: TAIL_NJ,
NH: TAIL_NH,
D: TAIL_D,
L: TAIL_L,
LG: TAIL_LG,
LM: TAIL_LM,
LB: TAIL_LB,
LS: TAIL_LS,
LT: TAIL_LT,
LP: TAIL_LP,
LH: TAIL_LH,
M: TAIL_M,
B: TAIL_B,
BS: TAIL_BS,
S: TAIL_S,
SS: TAIL_SS,
ZS: TAIL_NG,
J: TAIL_J,
C: TAIL_C,
K: TAIL_K,
T: TAIL_T,
P: TAIL_P,
H: TAIL_H,
}
// Convert compatibility jaeum to corresponding tail consonant
func Tail(c rune) rune {
if TAIL_G <= c && c <= TAIL_H {
return c
}
if t, ok := toTail[c]; ok {
return t
}
return 0
}
func leadIdx(l rune) (int, bool) {
i := int(l) - LEAD_BASE
if 0 > i || i > MAX_LEAD_IDX {
return 0, false
}
return i, true
}
func medialIdx(v rune) (int, bool) {
i := int(v) - MEDIAL_BASE
if 0 > i || i > MAX_MEDIAL_IDX {
return 0, false
}
return i, true
}
func tailIdx(t rune) (int, bool) {
if t == 0 {
// A hangul syllable can have no tail consonent.
return 0, true
}
i := int(t) - TAIL_BASE
if 0 > i || i > MAX_TAIL_IDX {
return 0, false
}
return i + 1, true
} | jamo.go | 0.613584 | 0.477311 | jamo.go | starcoder |
package rewrite
import (
"github.com/jonlawlor/matrixexp"
)
// AnyExp represents any matrix expression. It is intended for use with the
// Template rewriter. It implements matrix algebra but if it is ever used for
// calculations it will cause a runtime panic.
// Note that this is an int so that it actually takes up memory - this is
// intentional. Go puts zero sized structs into the same address, which is no
// good if we want to compare their memory location to determine identity for
// wildcard matching.
type AnyExp int
// String implements the Stringer interface.
func (m1 *AnyExp) String() string {
return "Any"
}
// Dims returns the matrix dimensions.
func (m1 *AnyExp) Dims() (r, c int) {
return 0, 0
}
// At returns the value at a given row, column index.
func (m1 *AnyExp) At(r, c int) float64 {
panic("cannot evaluate an AnyExpr")
}
// Eval returns a matrix literal.
func (m1 *AnyExp) Eval() matrixexp.MatrixLiteral {
panic("cannot evaluate an AnyExpr")
}
// Copy normally creates a (deep) copy of the Matrix Expression. However, to
// aid in rewriting expressions, Copy() of matrix expression wildcards is a nop.
func (m1 *AnyExp) Copy() matrixexp.MatrixExp {
// Copy of AnyExp does not produce a new value.
return m1
}
// Err returns the first error encountered while constructing the matrix expression.
func (m1 *AnyExp) Err() error {
panic("cannot evaluate an AnyExpr")
}
// T transposes a matrix.
func (m1 *AnyExp) T() matrixexp.MatrixExp {
return &matrixexp.T{m1}
}
// Add two matrices together.
func (m1 *AnyExp) Add(m2 matrixexp.MatrixExp) matrixexp.MatrixExp {
return &matrixexp.Add{
Left: m1,
Right: m2,
}
}
// Sub subtracts the right matrix from the left matrix.
func (m1 *AnyExp) Sub(m2 matrixexp.MatrixExp) matrixexp.MatrixExp {
return &matrixexp.Sub{
Left: m1,
Right: m2,
}
}
// Scale performs scalar multiplication.
func (m1 *AnyExp) Scale(c float64) matrixexp.MatrixExp {
return &matrixexp.Scale{
C: c,
M: m1,
}
}
// Mul performs matrix multiplication.
func (m1 *AnyExp) Mul(m2 matrixexp.MatrixExp) matrixexp.MatrixExp {
return &matrixexp.Mul{
Left: m1,
Right: m2,
}
}
// MulElem performs element-wise multiplication.
func (m1 *AnyExp) MulElem(m2 matrixexp.MatrixExp) matrixexp.MatrixExp {
return &matrixexp.MulElem{
Left: m1,
Right: m2,
}
}
// DivElem performs element-wise division.
func (m1 *AnyExp) DivElem(m2 matrixexp.MatrixExp) matrixexp.MatrixExp {
return &matrixexp.DivElem{
Left: m1,
Right: m2,
}
}
// Match determines if a matrix expression wildcard matches another matrix
// expression.
func (m1 *AnyExp) Match(m2 matrixexp.MatrixExp) error {
// AnyExp matches all other expressions.
return nil
} | rewrite/expmatch.go | 0.893733 | 0.549761 | expmatch.go | starcoder |
// SparseMax implementation based on https://github.com/gokceneraslan/SparseMax.torch
package fn
import (
"github.com/nlpodyssey/spago/pkg/mat"
"github.com/nlpodyssey/spago/pkg/mat/f64utils"
"math"
"sort"
)
type SparseMax struct {
x Operand
y mat.Matrix // initialized during the forward pass, required by the backward pass
}
var _ Function = &SparseMax{}
var _ Function = &SparseMaxLoss{}
func NewSparseMax(x Operand) *SparseMax {
return &SparseMax{x: x}
}
func (s *SparseMax) Forward() mat.Matrix {
s.y = mat.NewVecDense(sparseMax(translateInput(s.x.Value().Data())))
return s.y
}
func (s *SparseMax) Backward(gy mat.Matrix) {
if s.x.RequiresGrad() {
output := s.y.Data()
nzSum := 0.0
nzCount := 0.0
gx := mat.GetDenseWorkspace(s.x.Value().Rows(), s.x.Value().Columns())
defer mat.ReleaseDense(gx)
for i := range output {
if output[i] != 0 {
nzSum += gy.At(i, 0)
nzCount += 1
}
}
nzSum = nzSum / nzCount
for i := range output {
if output[i] != 0 {
gx.Set(i, 0, gy.At(i, 0)-nzSum)
} else {
gx.Set(i, 0, 0)
}
}
s.x.PropagateGrad(gx)
}
}
// translateInput translates the input by max for numerical stability
func translateInput(v []float64) []float64 {
maximum := max(v)
translated := make([]float64, len(v))
for i := range v {
translated[i] = v[i] - maximum
}
return translated
}
func sparseMaxCommon(v []float64) (zs []float64, bounds []float64, cumSumInput []float64, tau float64) {
zs = make([]float64, len(v))
copy(zs, v)
// Sort zs in descending order.
sort.Sort(sort.Reverse(sort.Float64Slice(zs)))
bounds = make([]float64, len(zs))
for i := range bounds {
bounds[i] = 1 + float64(i+1)*zs[i]
}
cumSumInput = make([]float64, len(zs))
f64utils.CumSum(cumSumInput, zs)
k := -1
tau = 0.0
for i := range zs {
if bounds[i] > cumSumInput[i] {
if k < (i + 1) {
k = i + 1
}
tau += zs[i]
}
}
tau = (tau - 1) / float64(k)
return zs, bounds, cumSumInput, tau
}
func sparseMax(v []float64) []float64 {
zs, _, _, tau := sparseMaxCommon(v)
//Reuses zs to avoid allocating new slice
for i := range zs {
zs[i] = math.Max(0.0, v[i]-tau)
}
return zs
}
type SparseMaxLoss struct {
x Operand
tau float64 // computed during the forward pass
y mat.Matrix // computed during forward pass
}
func NewSparseMaxLoss(x Operand) *SparseMaxLoss {
return &SparseMaxLoss{x: x}
}
// sparseMaxLoss computes the sparseMax loss function and returns
// the loss and the tau parameter (needed by backward)
func sparseMaxLoss(v []float64) ([]float64, float64) {
zs, bounds, cumSumInput, tau := sparseMaxCommon(v)
regTerm := 0.0
tauSquared := tau * tau
for i := range zs {
if bounds[i] > cumSumInput[i] {
regTerm += zs[i]*zs[i] - tauSquared
}
}
regTerm = regTerm*0.5 + 0.5
// Reuse zs to avoid allocating a new slice
for i := range zs {
zs[i] = v[i] - regTerm
}
return zs, tau
}
func (s *SparseMaxLoss) Forward() mat.Matrix {
output, tau := sparseMaxLoss(s.x.Value().Data())
s.y = mat.NewVecDense(output)
s.tau = tau
return s.y
}
func (s *SparseMaxLoss) Backward(gy mat.Matrix) {
if s.x.RequiresGrad() {
input := s.x.Value().Data()
sparseMax := make([]float64, len(input))
for i := range sparseMax {
sparseMax[i] = math.Max(0, input[i]-s.tau)
}
gx := mat.GetDenseWorkspace(s.x.Value().Rows(), s.x.Value().Columns())
defer mat.ReleaseDense(gx)
gyData := gy.Data()
gySum := f64utils.Sum(gyData)
for i := range gyData {
gx.Set(i, 0, gy.At(i, 0)-gySum*sparseMax[i])
}
s.x.PropagateGrad(gx)
}
} | pkg/ml/ag/fn/sparsemax.go | 0.754734 | 0.486758 | sparsemax.go | starcoder |
package discovery
import "fmt"
type Switch struct {
// A list of MQTT topics subscribed to receive availability (online/offline) updates. Must not be used together with `availability_topic`
// Default: <no value>
Availability []Availability `json:"availability,omitempty"`
// When `availability` is configured, this controls the conditions needed to set the entity to `available`. Valid entries are `all`, `any`, and `latest`. If set to `all`, `payload_available` must be received on all configured availability topics before the entity is marked as online. If set to `any`, `payload_available` must be received on at least one configured availability topic before the entity is marked as online. If set to `latest`, the last `payload_available` or `payload_not_available` received on any configured availability topic controls the availability
// Default: latest
AvailabilityMode string `json:"availability_mode,omitempty"`
// Defines a [template](/docs/configuration/templating/#processing-incoming-data) to extract device's availability from the `availability_topic`. To determine the devices's availability result of this template will be compared to `payload_available` and `payload_not_available`
// Default: <no value>
AvailabilityTemplate string `json:"availability_template,omitempty"`
// The MQTT topic subscribed to receive availability (online/offline) updates. Must not be used together with `availability`
// Default: <no value>
AvailabilityTopic string `json:"availability_topic,omitempty"`
// The MQTT topic to publish commands to change the switch state
// Default: <no value>
CommandTopic string `json:"command_topic,omitempty"`
// Information about the device this switch is a part of to tie it into the [device registry](https://developers.home-assistant.io/docs/en/device_registry_index.html). Only works through [MQTT discovery](/docs/mqtt/discovery/) and when [`unique_id`](#unique_id) is set. At least one of identifiers or connections must be present to identify the device
// Default: <no value>
Device *Device `json:"device,omitempty"`
// The [type/class](/integrations/switch/#device-class) of the switch to set the icon in the frontend
// Default: None
DeviceClass string `json:"device_class,omitempty"`
// Flag which defines if the entity should be enabled when first added
// Default: true
EnabledByDefault bool `json:"enabled_by_default,omitempty"`
// The [category](https://developers.home-assistant.io/docs/core/entity#generic-properties) of the entity
// Default: None
EntityCategory string `json:"entity_category,omitempty"`
// [Icon](/docs/configuration/customizing-devices/#icon) for the entity
// Default: <no value>
Icon string `json:"icon,omitempty"`
// Defines a [template](/docs/configuration/templating/#processing-incoming-data) to extract the JSON dictionary from messages received on the `json_attributes_topic`. Usage example can be found in [MQTT sensor](/integrations/sensor.mqtt/#json-attributes-template-configuration) documentation
// Default: <no value>
JsonAttributesTemplate string `json:"json_attributes_template,omitempty"`
// The MQTT topic subscribed to receive a JSON dictionary payload and then set as sensor attributes. Usage example can be found in [MQTT sensor](/integrations/sensor.mqtt/#json-attributes-topic-configuration) documentation
// Default: <no value>
JsonAttributesTopic string `json:"json_attributes_topic,omitempty"`
// The name to use when displaying this switch
// Default: MQTT Switch
Name string `json:"name,omitempty"`
// Used instead of `name` for automatic generation of `entity_id
// Default: <no value>
ObjectId string `json:"object_id,omitempty"`
// Flag that defines if switch works in optimistic mode
// Default: `true` if no `state_topic` defined, else `false`.
Optimistic bool `json:"optimistic,omitempty"`
// The payload that represents the available state
// Default: online
PayloadAvailable string `json:"payload_available,omitempty"`
// The payload that represents the unavailable state
// Default: offline
PayloadNotAvailable string `json:"payload_not_available,omitempty"`
// The payload that represents `off` state. If specified, will be used for both comparing to the value in the `state_topic` (see `value_template` and `state_off` for details) and sending as `off` command to the `command_topic`
// Default: OFF
PayloadOff string `json:"payload_off,omitempty"`
// The payload that represents `on` state. If specified, will be used for both comparing to the value in the `state_topic` (see `value_template` and `state_on` for details) and sending as `on` command to the `command_topic`
// Default: ON
PayloadOn string `json:"payload_on,omitempty"`
// The maximum QoS level of the state topic. Default is 0 and will also be used to publishing messages
// Default: 0
Qos int `json:"qos,omitempty"`
// If the published message should have the retain flag on or not
// Default: false
Retain bool `json:"retain,omitempty"`
// The payload that represents the `off` state. Used when value that represents `off` state in the `state_topic` is different from value that should be sent to the `command_topic` to turn the device `off`
// Default: `payload_off` if defined, else OFF
StateOff string `json:"state_off,omitempty"`
// The payload that represents the `on` state. Used when value that represents `on` state in the `state_topic` is different from value that should be sent to the `command_topic` to turn the device `on`
// Default: `payload_on` if defined, else ON
StateOn string `json:"state_on,omitempty"`
// The MQTT topic subscribed to receive state updates
// Default: <no value>
StateTopic string `json:"state_topic,omitempty"`
// An ID that uniquely identifies this switch device. If two switches have the same unique ID, Home Assistant will raise an exception
// Default: <no value>
UniqueId string `json:"unique_id,omitempty"`
// Defines a [template](/docs/configuration/templating/#processing-incoming-data) to extract device's state from the `state_topic`. To determine the switches's state result of this template will be compared to `state_on` and `state_off`
// Default: <no value>
ValueTemplate string `json:"value_template,omitempty"`
}
// AnnounceTopic returns the topic to announce the discoverable Switch
// Topic has the format below:
// <discovery_prefix>/<component>/<object_id>/config
// 'object_id' is either the UniqueId, the Name, or a hash of the Switch
func (d *Switch) AnnounceTopic(prefix string) string {
topicFormat := "%s/switch/%s/config"
objectID := ""
switch {
case d.UniqueId != "":
objectID = d.UniqueId
case d.Name != "":
objectID = d.Name
default:
objectID = hash(d)
}
return fmt.Sprintf(topicFormat, prefix, objectID)
} | switch.go | 0.834272 | 0.417034 | switch.go | starcoder |
package types
import "sync"
type TSafeUints interface {
// Reset the slice.
Reset()
// Contains say if "s" contains "values".
Contains(...uint) bool
// ContainsOneOf says if "s" contains one of the "values".
ContainsOneOf(...uint) bool
// Copy create a new copy of the slice.
Copy() TSafeUints
// Diff returns the difference between "s" and "s2".
Diff(Uints) Uints
// Empty says if the slice is empty.
Empty() bool
// Equal says if "s" and "s2" are equal.
Equal(Uints) bool
// Find the first element matching the pattern.
Find(func(v uint) bool) (uint, bool)
// FindAll elements matching the pattern.
FindAll(func(v uint) bool) Uints
// First return the value of the first element.
First() (uint, bool)
// Get the element "i" and say if it has been found.
Get(int) (uint, bool)
// Intersect return the intersection between "s" and "s2".
Intersect(Uints) Uints
// Last return the value of the last element.
Last() (uint, bool)
// Len returns the size of the slice.
Len() int
// Take n element and return a new slice.
Take(int) Uints
// S convert s into []interface{}
S() []interface{}
// S convert s into Uints
Uints() Uints
}
func SyncUints() TSafeUints {
return &tsafeUints{&sync.RWMutex{}, Uints{}}
}
type tsafeUints struct {
mu *sync.RWMutex
values Uints
}
func (s *tsafeUints) Reset() {
s.mu.Lock()
s.values.Reset()
s.mu.Unlock()
}
func (s *tsafeUints) Contains(values ...uint) (ok bool) {
s.mu.RLock()
ok = s.values.Contains(values...)
s.mu.RUnlock()
return
}
func (s *tsafeUints) ContainsOneOf(values ...uint) (ok bool) {
s.mu.RLock()
ok = s.values.ContainsOneOf(values...)
s.mu.RUnlock()
return
}
func (s *tsafeUints) Copy() TSafeUints {
s2 := &tsafeUints{mu: &sync.RWMutex{}}
s.mu.RLock()
s2.values = s.values.Copy()
s.mu.RUnlock()
return s2
}
func (s *tsafeUints) Diff(s2 Uints) (out Uints) {
s.mu.RLock()
out = s.values.Diff(s2)
s.mu.RUnlock()
return
}
func (s *tsafeUints) Empty() (ok bool) {
s.mu.RLock()
ok = s.values.Empty()
s.mu.RUnlock()
return
}
func (s *tsafeUints) Equal(s2 Uints) (ok bool) {
s.mu.RLock()
ok = s.values.Equal(s2)
s.mu.RUnlock()
return
}
func (s *tsafeUints) Find(matcher func(v uint) bool) (v uint, ok bool) {
s.mu.RLock()
v, ok = s.values.Find(matcher)
s.mu.RUnlock()
return
}
func (s *tsafeUints) FindAll(matcher func(v uint) bool) (v Uints) {
s.mu.RLock()
v = s.values.FindAll(matcher)
s.mu.RUnlock()
return
}
func (s *tsafeUints) First() (v uint, ok bool) {
s.mu.RLock()
v, ok = s.values.First()
s.mu.RUnlock()
return
}
func (s *tsafeUints) Get(i int) (v uint, ok bool) {
s.mu.RLock()
v, ok = s.values.Get(i)
s.mu.RUnlock()
return
}
func (s *tsafeUints) Intersect(s2 Uints) (v Uints) {
s.mu.RLock()
v = s.values.Intersect(s2)
s.mu.RUnlock()
return
}
func (s *tsafeUints) Last() (v uint, ok bool) {
s.mu.RLock()
v, ok = s.values.Last()
s.mu.RUnlock()
return
}
func (s *tsafeUints) Len() (v int) {
s.mu.RLock()
v = s.values.Len()
s.mu.RUnlock()
return
}
func (s *tsafeUints) Take(n int) (v Uints) {
s.mu.RLock()
v = s.values.Take(n)
s.mu.RUnlock()
return
}
func (s *tsafeUints) S() (out []interface{}) {
s.mu.RLock()
out = s.values.S()
s.mu.RUnlock()
return
}
func (s *tsafeUints) Uints() (v Uints) {
s.mu.RLock()
v = s.values.Copy()
s.mu.RUnlock()
return
} | syncuints.go | 0.709321 | 0.484807 | syncuints.go | starcoder |
package fonet
import (
"bytes"
"encoding/json"
"errors"
"io"
"log"
"math/rand"
"time"
)
func init() {
rand.Seed(time.Now().UnixNano())
}
var (
// ErrNotEnoughLayers is returned when trying to create a new network with too few layers.
ErrNotEnoughLayers = errors.New("too few layers, minimum of 3 required")
)
// Network is containing all the needed settings/variables.
type Network struct {
// w is the weights of the network.
w [][][]float64
// b is the biases of the network.
b [][]float64
// d is the delta values for each layer.
d [][]float64
// z is the z values for each layer.
z [][]float64
// l holds the number of layers in the network.
l int
// ls is the number of neurons in each layer.
ls []int
// activationID is the ID of the activation function used. This is stored for serialization purposes.
activationID ActivationFunction
// aFunc is the activation function
aFunc func(z float64) float64
// daFunc is the derivative of the activation function.
daFunc func(z float64) float64
}
type jsonNetwork struct {
W [][][]float64 `json:"W"`
B [][]float64 `json:"B"`
D [][]float64 `json:"D"`
Z [][]float64 `json:"Z"`
L int `json:"L"`
LS []int `json:"LS"`
ActivationID int `json:"ActivationID"`
}
// Copy returns a deep copy of the network.
func (n Network) Copy() *Network {
newNet := Network{
w: make([][][]float64, len(n.w)),
b: make([][]float64, len(n.b)),
d: make([][]float64, len(n.d)),
z: make([][]float64, len(n.z)),
l: n.l,
ls: make([]int, len(n.ls)),
activationID: n.activationID,
aFunc: n.aFunc,
daFunc: n.daFunc,
}
// Deep copy all weights
for i := range n.w {
newNet.w[i] = make([][]float64, len(n.w[i]))
for j := range n.w[i] {
newNet.w[i][j] = make([]float64, len(n.w[i][j]))
copy(newNet.w[i][j], n.w[i][j])
}
}
// Deep copy all biases
for i := range n.b {
newNet.b[i] = make([]float64, len(n.b[i]))
copy(newNet.b[i], n.b[i])
}
// Deep copy all deltas
for i := range n.d {
newNet.d[i] = make([]float64, len(n.d[i]))
copy(newNet.d[i], n.d[i])
}
// Copy the layer structure
copy(newNet.ls, n.ls)
return &newNet
}
// MarshalJSON implements the Marshaler interface for JSON encoding.
func (n *Network) MarshalJSON() ([]byte, error) {
var buf bytes.Buffer
err := json.NewEncoder(&buf).Encode(jsonNetwork{
W: n.w,
B: n.b,
D: n.d,
Z: n.z,
L: n.l,
LS: n.ls,
ActivationID: int(n.activationID),
})
if err != nil {
return nil, err
}
return buf.Bytes(), nil
}
// UnmarshalJSON implements the Unmarshaler interface for JSON encoding.
func (n *Network) UnmarshalJSON(data []byte) error {
var en jsonNetwork
if err := json.Unmarshal(data, &en); err != nil {
return err
}
n.w = en.W
n.b = en.B
n.d = en.D
n.z = en.Z
n.l = en.L
n.ls = en.LS
n.activationID = ActivationFunction(en.ActivationID)
n.aFunc = functionPairs[n.activationID][0]
n.daFunc = functionPairs[n.activationID][1]
return nil
}
// NewNetwork is for creating a new network with the defined layers.
func NewNetwork(ls []int, activationFunc ActivationFunction) (*Network, error) {
if len(ls) < 3 {
return nil, ErrNotEnoughLayers
}
n := Network{
l: len(ls) - 1,
ls: ls[1:],
activationID: activationFunc,
aFunc: functionPairs[activationFunc][0],
daFunc: functionPairs[activationFunc][1],
}
// init weights
n.w = make([][][]float64, n.l)
n.w[0] = make([][]float64, ls[0])
for i := 0; i < ls[0]; i++ {
n.w[0][i] = make([]float64, n.ls[0])
for j := 0; j < n.ls[0]; j++ {
n.w[0][i][j] = rand.Float64()
}
}
for l := 1; l < n.l; l++ {
n.w[l] = make([][]float64, n.ls[l-1])
for i := 0; i < n.ls[l-1]; i++ {
n.w[l][i] = make([]float64, n.ls[l])
for j := 0; j < n.ls[l]; j++ {
n.w[l][i][j] = rand.Float64()
}
}
}
// init biases, deltas, z(s)
n.b = make([][]float64, n.l)
n.d = make([][]float64, n.l)
n.z = make([][]float64, n.l)
for l := 0; l < n.l; l++ {
n.b[l] = make([]float64, n.ls[l])
for i := 0; i < n.ls[l]; i++ {
n.b[l][i] = rand.Float64()
}
n.d[l] = make([]float64, n.ls[l])
n.z[l] = make([]float64, n.ls[l])
}
return &n, nil
}
func (n *Network) dw(l, i, j int, eta float64) float64 {
return -eta * n.d[l][j] * n.a(l-1, i)
}
func (n *Network) a(l, j int) float64 {
return n.aFunc(n.z[l][j])
}
// Train is for training the network with the specified dataset, epoch and learning rate. The last bool parameter is for
// tracking where the training is. It'll log each epoch.
func (n *Network) Train(trainingData [][][]float64, epochs int, lrate float64, debug bool) {
for e := 0; e < epochs; e++ {
for _, xy := range trainingData {
n.backpropagate(xy, lrate)
}
if debug {
log.Println("Epoch:", e+1, "/", epochs)
}
}
}
func (n *Network) backpropagate(xy [][]float64, eta float64) {
x := xy[0]
y := xy[1]
// define z values
_ = n.feedforward(x)
// define the output deltas
for j := 0; j < len(n.d[len(n.d)-1]); j++ {
n.d[len(n.d)-1][j] = (n.a(len(n.d)-1, j) - y[j]) * n.daFunc(n.z[len(n.d)-1][j])
}
// define the inner deltas
for l := len(n.d) - 2; l >= 0; l-- {
for j := 0; j < len(n.d[l]); j++ {
n.d[l][j] = n.delta(l, j)
}
}
// update weights
for i := 0; i < len(n.w[0]); i++ {
for j := 0; j < len(n.w[0][i]); j++ {
n.w[0][i][j] += -eta * n.d[0][j] * x[i]
}
}
for l := 1; l < len(n.w); l++ {
for i := 0; i < len(n.w[l]); i++ {
for j := 0; j < len(n.w[l][i]); j++ {
n.w[l][i][j] += n.dw(l, i, j, eta)
}
}
}
// update biases
for l := 0; l < len(n.b); l++ {
for j := 0; j < len(n.b[l]); j++ {
n.b[l][j] += -eta * n.d[l][j]
}
}
}
// delta should only use in the back-propagation, otherwise it can return wrong value.
func (n *Network) delta(l, j int) float64 {
var d float64
for k := 0; k < n.ls[l+1]; k++ {
d += n.d[l+1][k] * n.w[l+1][j][k] * n.daFunc(n.z[l][j])
}
return d
}
func (n *Network) feedforward(a []float64) []float64 {
for l := 0; l < n.l; l++ {
atemp := make([]float64, n.ls[l])
for j := 0; j < n.ls[l]; j++ {
n.z[l][j] = 0
for i := 0; i < len(a); i++ {
n.z[l][j] += n.w[l][i][j] * a[i]
}
n.z[l][j] += n.b[l][j]
atemp[j] = n.aFunc(n.z[l][j])
}
a = atemp
}
return a
}
// fluctuateBiases will randomly fluctuate all biases by +/- percentageMargin/2
func (n *Network) fluctuateBiases(percentageMargin float64) {
for i := range n.b {
for j := range n.b[i] {
percentage := (rand.Float64() * percentageMargin) / 2
n.b[i][j] *= (100 + percentage) / 100
}
}
}
// fluctuateDelta will randomly fluctuate all deltas by +/- percentageMargin/2 %
func (n *Network) fluctuateDelta(percentageMargin float64) {
for i := range n.d {
for j := range n.d[i] {
percentage := (rand.Float64() * percentageMargin) / 2
n.d[i][j] *= (100 + percentage) / 100
}
}
}
// fluctuateWeights will randomly fluctuate all weights by +/- percentageMargin/2 %
func (n *Network) fluctuateWeights(percentageMargin float64) {
for i := range n.w {
for j := range n.w[i] {
for k := range n.w[i][j] {
percentage := (rand.Float64() * percentageMargin) / 2
n.w[i][j][k] *= (100 + percentage) / 100
}
}
}
}
// Predict calculates the output for the given input.
func (n *Network) Predict(input []float64) []float64 {
return n.feedforward(input)
}
// Export will serialize the network, and write it to the provided writer.
func (n *Network) Export(w io.Writer) error {
return json.NewEncoder(w).Encode(n)
}
// Load will load a network from the provided reader.
func Load(r io.Reader) (*Network, error) {
var n Network
if err := json.NewDecoder(r).Decode(&n); err != nil {
return nil, err
}
return &n, nil
}
// LoadFrom will load a network from the provided byte slice.
func LoadFrom(bs []byte) (*Network, error) {
buf := bytes.NewBuffer(bs)
return Load(buf)
} | network.go | 0.61115 | 0.458652 | network.go | starcoder |
package table
import (
"reflect"
"github.com/onsi/ginkgo"
)
/*
TableEntry represents an entry in a table test. You generally use the `Entry` constructor.
*/
type TableEntry struct {
Description string
Parameters []interface{}
Pending bool
Focused bool
}
func (t TableEntry) generateIt(itBody reflect.Value) {
if t.Pending {
ginkgo.PIt(t.Description)
return
}
values := make([]reflect.Value, 0)
itBodyType := itBody.Type()
for i, param := range t.Parameters {
var value reflect.Value
if param == nil {
inType := itBodyType.In(i)
value = reflect.Zero(inType)
} else {
value = reflect.ValueOf(param)
}
values = append(values, value)
}
var (
body interface{}
timeout []float64
)
if itBodyType.NumIn() >= 1 && itBodyType.In(0).Kind() == reflect.Chan &&
itBodyType.In(0).Elem().Kind() == reflect.Interface {
lenValues := len(values)
if lenValues > 0 && values[lenValues-1].Kind() == reflect.Float64 {
timeout = append(timeout, values[lenValues-1].Interface().(float64))
values = values[:lenValues-1]
}
body = func(done chan<- interface{}) {
values = append([]reflect.Value{reflect.ValueOf(done)}, values...)
itBody.Call(values)
}
} else {
body = func() {
itBody.Call(values)
}
}
if t.Focused {
ginkgo.FIt(t.Description, body, timeout...)
} else {
ginkgo.It(t.Description, body, timeout...)
}
}
/*
Entry constructs a TableEntry.
The first argument is a required description (this becomes the content of the generated Ginkgo `It`).
Subsequent parameters are saved off and sent to the callback passed in to `DescribeTable`.
Each Entry ends up generating an individual Ginkgo It.
*/
func Entry(description string, parameters ...interface{}) TableEntry {
return TableEntry{description, parameters, false, false}
}
/*
You can focus a particular entry with FEntry. This is equivalent to FIt.
*/
func FEntry(description string, parameters ...interface{}) TableEntry {
return TableEntry{description, parameters, false, true}
}
/*
You can mark a particular entry as pending with PEntry. This is equivalent to PIt.
*/
func PEntry(description string, parameters ...interface{}) TableEntry {
return TableEntry{description, parameters, true, false}
}
/*
You can mark a particular entry as pending with XEntry. This is equivalent to XIt.
*/
func XEntry(description string, parameters ...interface{}) TableEntry {
return TableEntry{description, parameters, true, false}
} | extensions/table/table_entry.go | 0.672547 | 0.433622 | table_entry.go | starcoder |
package main
import (
"bytes"
"encoding/json"
"math"
"strings"
"github.com/maruel/subcommands"
)
var cmdEditDistance = &subcommands.Command{
UsageLine: `edit-distance [options]`,
ShortDesc: `Computes the edit distance of two strings.`,
LongDesc: `Computes the edit distance of two strings using insertion, deletion,
substitution, and optionally also transposition. This expects the properties
of the quest to be a single JSON dictionary with two keys "a" and "b". Each
key should correspond to a string. The result of the job will be a dictionary
with a key "dist" that is the minimum number of edit operations needed to
transform a into b.`,
CommandRun: func() subcommands.CommandRun {
r := &editDistanceRun{}
r.registerOptions()
return r
},
}
type editDistanceRun struct {
cmdRun
useTransposition bool
}
func (e *editDistanceRun) registerOptions() {
e.cmdRun.registerOptions()
e.Flags.BoolVar(&e.useTransposition, "use-transposition", false,
"Include transposition as one of the possible candidates.")
}
// EditParams is the 'parameters' for the edit-distance algorithm.
type EditParams struct {
A string `json:"a"`
B string `json:"b"`
}
// EditResult is the normal result data for the edit-distance algorithm.
type EditResult struct {
Distance uint32 `json:"dist"`
// OpHistory is a string which says which edit commands were taken. The
// symbols are:
// = - no edit
// ~ - substitute
// - - delete
// + - insert
// X - transpose
// ! - error
OpHistory string `json:"op_history"`
Error string `json:"error,omitempty"`
}
func (e *editDistanceRun) Run(a subcommands.Application, args []string, env subcommands.Env) int {
e.start(a, e, env, cmdEditDistance)
p := &EditParams{}
err := json.NewDecoder(bytes.NewBufferString(e.questDesc.Parameters)).Decode(p)
if err != nil {
return e.finish(EditResult{Error: err.Error()})
}
rslt, stop := e.compute(p)
if stop {
return 0
}
return e.finish(rslt)
}
var editSymbolLookup = map[string]string{
"substitution": "~",
"insertion": "+",
"deletion": "-",
"equality": "=",
"transposition": "X",
}
func (e *editDistanceRun) compute(p *EditParams) (rslt EditResult, stop bool) {
// If one string is empty, the edit distance is the length of the other
// string.
if len(p.A) == 0 {
rslt.Distance = uint32(len(p.B))
rslt.OpHistory = strings.Repeat("+", len(p.B))
return
} else if len(p.B) == 0 {
rslt.Distance = uint32(len(p.A))
rslt.OpHistory = strings.Repeat("-", len(p.A))
return
}
// If both strings are exactly equality, the distance between them is 0.
if p.A == p.B {
rslt.OpHistory = strings.Repeat("=", len(p.A))
return
}
toDep := []interface{}{
&EditParams{ // substitution || equality
A: p.A[1:],
B: p.B[1:],
},
&EditParams{ // deletion (remove a char from A)
A: p.A[1:],
B: p.B,
},
&EditParams{ // insertion (add the char to A)
A: p.A,
B: p.B[1:],
},
}
if e.useTransposition && len(p.A) > 1 && len(p.B) > 1 {
if p.A[0] == p.B[1] && p.A[1] == p.B[0] {
toDep = append(toDep, &EditParams{ // transposition
A: p.A[2:],
B: p.B[2:],
})
}
}
opNames := []string{
"substitution", "deletion", "insertion", "transposition",
}
if p.A[0] == p.B[0] {
opNames[0] = "equality"
}
retval := uint32(math.MaxUint32)
opname := ""
opchain := ""
depsData, stop := e.depOn(toDep...)
if stop {
return
}
for i, dep := range depsData {
result := &EditResult{}
err := json.NewDecoder(bytes.NewBufferString(dep)).Decode(result)
if err != nil {
rslt.Error = err.Error()
return
}
opName := opNames[i]
if result.Error != "" {
rslt.OpHistory = "!" + result.OpHistory
rslt.Error = result.Error
return
}
cost := result.Distance
if opName != "equality" {
// all operations (except equality) cost 1
cost++
}
if cost < retval {
retval = cost
opname = opName
opchain = result.OpHistory
}
}
rslt.Distance = retval
rslt.OpHistory = editSymbolLookup[opname] + opchain
return
} | dm/tools/jobsim_client/edit_distance.go | 0.733833 | 0.437643 | edit_distance.go | starcoder |
package golf
// LoadMap loads the sprite sheet into memory
func (e *Engine) LoadMap(mapData [0x4800]byte) {
for i, b := range mapData {
e.RAM[mapBase-i] = b
}
}
// Map draws the map on the screen starting from tile
// mx, my with a size of mw and mh. The map is draw
// at screen coordinate dx, dy
func (e *Engine) Map(mx, my, mw, mh int, dx, dy float64, opts ...SOp) {
opt := SOp{}
if len(opts) > 0 {
opt = opts[0]
}
if opt.W == 0 {
opt.W = 1
}
if opt.H == 0 {
opt.H = 1
}
if opt.SH == 0 {
opt.SH = 1
}
if opt.SW == 0 {
opt.SW = 1
}
cx := toInt(e.RAM[cameraX:cameraX+2], true)
cy := toInt(e.RAM[cameraY:cameraY+2], true)
if opt.Fixed {
cx, cy = 0, 0
}
for x := 0; x < mw; x++ {
sprX := int(float64(int(dx)+x*8*opt.W) * roundPxl(opt.SW, float64(8*opt.W)))
if !tileInboundsX(sprX-cx, opt) {
continue
}
for y := 0; y < mh; y++ {
sprY := int(float64(int(dy)+y*8*opt.H) * roundPxl(opt.SH, float64(8*opt.H)))
if !tileInboundsY(sprY-cy, opt) {
continue
}
s := e.Mget(x+mx, y+my)
if s == 0 {
continue
}
e.Spr(s, float64(sprX), float64(sprY), opt)
}
}
}
// roundPxl rounds to the nearist pixel rather than the nearist number
// number is the number to be rounded, size is the number of pixels
func roundPxl(number, size float64) float64 {
inc := 1.0 / size
num := int(number)
frac := number - float64(num)
for i := 0.0; i < 8.0; i += inc {
if i > frac {
return float64(num) + (i - inc)
}
}
return float64(num)
}
// tileInboundsX checks if the x coordiante is in screen bounds
// sprite opts are taken into consideration
func tileInboundsX(x int, opt SOp) bool {
w := int(float64(8*opt.W) * opt.SW)
return tileInbounds(x, 96, w, 0)
}
// tileInboundsX checks if the y coordiante is in screen bounds
// sprite opts are taken into consideration
func tileInboundsY(y int, opt SOp) bool {
h := int(float64(8*opt.H) * opt.SH)
return tileInbounds(96, y, 0, h)
}
// tileInbounds checks if the x and y coordinates are in screen bounds
// w and h ensure the tiles that are partially off screen are still drawn
func tileInbounds(x, y, w, h int) bool {
if x < -w || x > 192+w {
return false
}
if y < -h || y > 192+h {
return false
}
return true
}
// Mget gets the tile at the x, y coordinate on the map
func (e *Engine) Mget(x, y int) int {
dex := x + y*128
shift := dex % 8
i := ((dex / 8) * 9) + shift
j := ((dex / 8) * 9) + 8
return int(e.RAM[mapBase-j])<<(shift+1)&0b100000000 | int(e.RAM[mapBase-i])
}
// Mset sets the tile at the x, y coordinate on the map
func (e *Engine) Mset(x, y, t int) {
dex := x + y*128
shift := dex % 8
i := ((dex / 8) * 9) + shift
j := ((dex / 8) * 9) + 8
e.RAM[mapBase-i] = byte(t)
e.RAM[mapBase-j] &= (0b00000001 << (7 - shift)) ^ 0b11111111
e.RAM[mapBase-j] |= byte(t>>1&0b10000000) >> shift
} | golf/golfMap.go | 0.726717 | 0.553807 | golfMap.go | starcoder |
package observability
import (
"go.opencensus.io/stats"
"go.opencensus.io/stats/view"
"go.opencensus.io/tag"
)
// Pool metrics:
// 1. Connections taken
// 2. Connections closed
// 3. Connections usetime -- how long is a connection used until it is closed, discarded or returned
// 4. Connections reused
// 4. Connections stale
// 5. Dial errors
const dimensionless = "1"
const unitMilliseconds = "ms"
var (
MBytesRead = stats.Int64("redis/bytes_read", "The number of bytes read from the server", stats.UnitBytes)
MBytesWritten = stats.Int64("redis/bytes_written", "The number of bytes written out to the server", stats.UnitBytes)
MDialErrors = stats.Int64("redis/dial_errors", "The number of dial errors", dimensionless)
MConnectionsTaken = stats.Int64("redis/connections_taken", "The number of connections taken", dimensionless)
MConnectionsClosed = stats.Int64("redis/connections_closed", "The number of connections closed", dimensionless)
MConnectionsReturned = stats.Int64("redis/connections_returned", "The number of connections returned to the pool", dimensionless)
MConnectionsReused = stats.Int64("redis/connections_reused", "The number of connections reused", dimensionless)
MConnectionsNew = stats.Int64("redis/connections_new", "The number of newly created connections", dimensionless)
MConnectionUseTimeMilliseconds = stats.Float64("redis/connection_usetime", "The number of milliseconds for which a connection is used", unitMilliseconds)
MRoundtripLatencyMilliseconds = stats.Float64("redis/roundtrip_latency", "The time between sending the first byte to the server until the last byte of response is received back", unitMilliseconds)
MWriteErrors = stats.Int64("redis/write_errors", "The number of errors encountered during write routines", dimensionless)
MWrites = stats.Int64("redis/writes", "The number of write invocations", dimensionless)
)
var KeyCommandName, _ = tag.NewKey("cmd")
var defaultMillisecondsDistribution = view.Distribution(
// [0ms, 0.01ms, 0.05ms, 0.1ms, 0.5ms, 1ms, 1.5ms, 2ms, 2.5ms, 5ms, 10ms, 25ms, 50ms, 100ms, 200ms, 400ms, 600ms, 800ms, 1s, 1.5s, 2.5s, 5s, 10s, 20s, 40s, 100s, 200s, 500s]
0, 0.00001, 0.00005, 0.0001, 0.0005, 0.001, 0.0015, 0.002, 0.0025, 0.005, 0.01, 0.025, 0.05, 0.1, 0.2, 0.4, 0.6, 0.8, 1, 1.5, 2.5, 5, 10, 20, 40, 100, 200, 500,
)
var defaultBytesDistribution = view.Distribution(
// [0, 1KB, 2KB, 4KB, 16KB, 64KB, 256KB, 1MB, 4MB, 16MB, 64MB, 256MB, 1GB, 4GB]
0, 1024, 2048, 4096, 16384, 65536, 262144, 1048576, 4194304, 16777216, 67108864, 268435456, 1073741824, 4294967296,
)
var Views = []*view.View{
{
Name: "redis/client/connection_usetime",
Description: "The duration in milliseconds for which a connection is used before being returned to the pool, closed or discarded",
Aggregation: defaultMillisecondsDistribution,
Measure: MConnectionUseTimeMilliseconds,
},
{
Name: "redis/client/dial_errors",
Description: "The number of errors encountered after dialling",
Aggregation: view.Count(),
Measure: MDialErrors,
},
{
Name: "redis/client/bytes_written_cummulative",
Description: "The number of bytes written out to the server",
Aggregation: view.Count(),
Measure: MBytesWritten,
},
{
Name: "redis/client/bytes_written_distribution",
Description: "The number of distribution of bytes written out to the server",
Aggregation: defaultBytesDistribution,
Measure: MBytesWritten,
},
{
Name: "redis/client/bytes_read_cummulative",
Description: "The number of bytes read from a response from the server",
Aggregation: view.Count(),
Measure: MBytesRead,
},
{
Name: "redis/client/bytes_read_distribution",
Description: "The number of distribution of bytes read from the server",
Aggregation: defaultBytesDistribution,
Measure: MBytesRead,
},
{
Name: "redis/client/roundtrip_latency",
Description: "The distribution of milliseconds of the roundtrip latencies",
Aggregation: defaultMillisecondsDistribution,
Measure: MRoundtripLatencyMilliseconds,
TagKeys: []tag.Key{KeyCommandName},
},
{
Name: "redis/client/write_errors",
Description: "The number of errors encountered during a write routine",
Aggregation: view.Count(),
Measure: MWriteErrors,
TagKeys: []tag.Key{KeyCommandName},
},
{
Name: "redis/client/writes",
Description: "The number of write invocations",
Aggregation: view.Count(),
Measure: MWrites,
TagKeys: []tag.Key{KeyCommandName},
},
{
Name: "redis/client/connections_taken",
Description: "The number of connections taken out the pool",
Aggregation: view.Count(),
Measure: MConnectionsTaken,
},
{
Name: "redis/client/connections_returned",
Description: "The number of connections returned the connection pool",
Aggregation: view.Count(),
Measure: MConnectionsReturned,
},
{
Name: "redis/client/connections_reused",
Description: "The number of connections reused",
Aggregation: view.Count(),
Measure: MConnectionsReused,
},
{
Name: "redis/client/connections_new",
Description: "The number of newly created connections",
Aggregation: view.Count(),
Measure: MConnectionsNew,
},
} | internal/observability/observability.go | 0.524395 | 0.531878 | observability.go | starcoder |
package tdigest
import (
"math"
"sort"
)
var (
nan = math.NaN()
positiveInfinity = math.Inf(1)
negativeInfinity = math.Inf(-1)
sentinelCentroid = Centroid{Mean: positiveInfinity, Weight: 0.0}
)
type centroidsByMeanAsc []Centroid
func (c centroidsByMeanAsc) Len() int { return len(c) }
func (c centroidsByMeanAsc) Less(i, j int) bool { return c[i].Mean < c[j].Mean }
func (c centroidsByMeanAsc) Swap(i, j int) { c[i], c[j] = c[j], c[i] }
type mergeCentroidFn func(
currIndex float64,
currWeight float64,
totalWeight float64,
c Centroid,
mergeResult []Centroid,
) (float64, []Centroid)
type tDigest struct {
compression float64 // compression factor
mergedCapacity int // merged centroid slice capacity
unmergedCapacity int // unmerged centroid slice capacity
multiplier int64 // quantile precision multiplier
mergeCentroidFn mergeCentroidFn // function to merge centroids
centroidsPool CentroidsPool // centroids pool
closed bool // whether the t-digest is closed
merged []Centroid // merged centroids
mergedWeight float64 // total weight of merged centroids
unmerged []Centroid // unmerged centroid slice capacity
unmergedWeight float64 // total weight of unmerged centroids
minValue float64 // minimum value
maxValue float64 // maximum value
}
// mergedCapacity computes the capacity of the merged centroid slice.
func mergedCapacity(compression float64) int {
return int(math.Ceil(math.Pi*compression + 0.5))
}
func unmergedCapacity(compression float64) int {
// NB: the formula is taken from tdunning's implementation by
// regressing against known sizes for sample compression values.
compression = math.Min(math.Max(20, compression), 1000)
return int(7.5 + 0.37*compression - 2e-4*compression*compression)
}
// NewTDigest creates a new t-digest.
// TODO(xichen): add pooling for t-digests
// TODO(xichen): add metrics
func NewTDigest(opts Options) TDigest {
centroidsPool := opts.CentroidsPool()
compression := opts.Compression()
mergedCapacity := mergedCapacity(compression)
unmergedCapacity := unmergedCapacity(compression)
var multiplier int64
if precision := opts.Precision(); precision != 0 {
multiplier = int64(math.Pow10(precision))
}
d := &tDigest{
compression: opts.Compression(),
multiplier: multiplier,
centroidsPool: centroidsPool,
mergedCapacity: mergedCapacity,
unmergedCapacity: unmergedCapacity,
}
d.mergeCentroidFn = d.mergeCentroid
d.Reset()
return d
}
func (d *tDigest) Merged() []Centroid {
return d.merged
}
func (d *tDigest) Unmerged() []Centroid {
return d.unmerged
}
func (d *tDigest) Add(value float64) {
d.add(value, 1.0)
}
func (d *tDigest) Min() float64 {
return d.Quantile(0.0)
}
func (d *tDigest) Max() float64 {
return d.Quantile(1.0)
}
func (d *tDigest) Quantile(q float64) float64 {
if q < 0.0 || q > 1.0 {
return nan
}
// compress the centroids first.
d.compress()
// If the digest is empty, return 0.
if len(d.merged) == 0 {
return 0.0
}
if q == 0.0 {
return d.minValue
}
if q == 1.0 {
return d.maxValue
}
var (
targetWeight = q * d.mergedWeight
currWeight = 0.0
lowerBound = d.minValue
upperBound float64
)
for i, c := range d.merged {
upperBound = d.upperBound(i)
if targetWeight <= currWeight+c.Weight {
// The quantile falls within this centroid.
ratio := (targetWeight - currWeight) / c.Weight
quantile := lowerBound + ratio*(upperBound-lowerBound)
// If there is a desired precision, we truncate the quantile per the precision requirement.
if d.multiplier != 0 {
quantile = math.Trunc(quantile*float64(d.multiplier)) / float64(d.multiplier)
}
return quantile
}
currWeight += c.Weight
lowerBound = upperBound
}
// NB(xichen): should never get here unless the centroids array are empty
// because the target weight should always be no larger than the total weight.
return nan
}
func (d *tDigest) Merge(tdigest TDigest) {
merged := tdigest.Merged()
for _, c := range merged {
d.add(c.Mean, c.Weight)
}
unmerged := tdigest.Unmerged()
for _, c := range unmerged {
d.add(c.Mean, c.Weight)
}
}
func (d *tDigest) Close() {
if d.closed {
return
}
d.closed = true
d.centroidsPool.Put(d.merged)
d.centroidsPool.Put(d.unmerged)
}
func (d *tDigest) Reset() {
d.closed = false
d.merged = d.centroidsPool.Get(d.mergedCapacity)
d.mergedWeight = 0.0
d.unmerged = d.centroidsPool.Get(d.unmergedCapacity)
d.unmergedWeight = 0.0
d.minValue = positiveInfinity
d.maxValue = negativeInfinity
}
// compress merges unmerged centroids and merged centroids.
func (d *tDigest) compress() {
if len(d.unmerged) == 0 {
return
}
sort.Sort(centroidsByMeanAsc(d.unmerged))
var (
totalWeight = d.mergedWeight + d.unmergedWeight
currWeight = 0.0
currIndex = 0.0
mergedIndex = 0
unmergedIndex = 0
mergeResult = d.centroidsPool.Get(len(d.merged) + len(d.unmerged))
)
for mergedIndex < len(d.merged) || unmergedIndex < len(d.unmerged) {
currUnmerged := sentinelCentroid
if unmergedIndex < len(d.unmerged) {
currUnmerged = d.unmerged[unmergedIndex]
}
currMerged := sentinelCentroid
if mergedIndex < len(d.merged) {
currMerged = d.merged[mergedIndex]
}
if currUnmerged.Mean < currMerged.Mean {
currIndex, mergeResult = d.mergeCentroidFn(currIndex, currWeight, totalWeight, currUnmerged, mergeResult)
currWeight += currUnmerged.Weight
unmergedIndex++
} else {
currIndex, mergeResult = d.mergeCentroidFn(currIndex, currWeight, totalWeight, currMerged, mergeResult)
currWeight += currMerged.Weight
mergedIndex++
}
}
d.centroidsPool.Put(d.merged)
d.merged = mergeResult
d.mergedWeight = totalWeight
d.unmerged = d.unmerged[:0]
d.unmergedWeight = 0.0
}
// mergeCentroid merges a centroid into the list of merged centroids.
func (d *tDigest) mergeCentroid(
currIndex float64,
currWeight float64,
totalWeight float64,
c Centroid,
mergeResult []Centroid,
) (float64, []Centroid) {
nextIndex := d.nextIndex((currWeight + c.Weight) / totalWeight)
if nextIndex-currIndex > 1 || len(mergeResult) == 0 {
// This is the first centroid added, or the next index is too far away from the current index.
mergeResult = d.appendCentroid(mergeResult, c)
return d.nextIndex(currWeight / totalWeight), mergeResult
}
// The new centroid falls within the range of the current centroid.
numResults := len(mergeResult)
mergeResult[numResults-1].Weight += c.Weight
mergeResult[numResults-1].Mean += (c.Mean - mergeResult[numResults-1].Mean) * c.Weight / mergeResult[numResults-1].Weight
return currIndex, mergeResult
}
// nextIndex estimates the index of the next centroid.
func (d *tDigest) nextIndex(quantile float64) float64 {
return d.compression * (math.Asin(2*quantile-1)/math.Pi + 0.5)
}
// add adds a weighted value.
func (d *tDigest) add(value float64, weight float64) {
if len(d.unmerged) == d.unmergedCapacity {
d.compress()
}
d.minValue = math.Min(d.minValue, value)
d.maxValue = math.Max(d.maxValue, value)
d.unmerged = d.appendCentroid(d.unmerged, Centroid{Mean: value, Weight: weight})
d.unmergedWeight += weight
}
// upperBound returns the upper bound for computing quantiles given the centroid index.
// d.merged is guaranteed to have at least one centroid when upperBound is called.
func (d *tDigest) upperBound(index int) float64 {
if index == len(d.merged)-1 {
return d.maxValue
}
return (d.merged[index].Mean + d.merged[index+1].Mean) / 2.0
}
func (d *tDigest) appendCentroid(centroids []Centroid, c Centroid) []Centroid {
if len(centroids) == cap(centroids) {
newCentroids := d.centroidsPool.Get(2 * len(centroids))
newCentroids = append(newCentroids, centroids...)
d.centroidsPool.Put(centroids)
centroids = newCentroids
}
return append(centroids, c)
} | src/aggregator/aggregation/quantile/tdigest/tdigest.go | 0.631481 | 0.468851 | tdigest.go | starcoder |
package gt
import (
"database/sql/driver"
"encoding/json"
"strconv"
)
/*
Shortcut: parses successfully or panics. Should be used only in root scope. When
error handling is relevant, use `.Parse`.
*/
func ParseNullInt(src string) (val NullInt) {
try(val.Parse(src))
return
}
/*
Variant of `int64` where zero value is considered empty in text, and null in
JSON and SQL. Use this for fields where 0 is not allowed, such as primary and
foreign keys, or unique bigserials.
Unlike `int64`, encoding/decoding is not always reversible:
JSON 0 → Go 0 → JSON null
SQL 0 → Go 0 → SQL null
Differences from `"database/sql".NullInt64`:
* Much easier to use.
* Supports text.
* Supports JSON.
* Fewer states: null and zero are one.
In your data model, numeric fields should be either:
* Non-nullable; zero value = 0; use `int64`.
* Nullable; zero value = `null`; 0 is not allowed; use `gt.NullInt`.
Avoid `*intN` or `sql.NullIntN`.
*/
type NullInt int64
var (
_ = Encodable(NullInt(0))
_ = Decodable((*NullInt)(nil))
)
// Implement `gt.Zeroable`. Equivalent to `reflect.ValueOf(self).IsZero()`.
func (self NullInt) IsZero() bool { return self == 0 }
// Implement `gt.Nullable`. True if zero.
func (self NullInt) IsNull() bool { return self.IsZero() }
// Implement `gt.PtrGetter`, returning `*int64`.
func (self *NullInt) GetPtr() interface{} { return (*int64)(self) }
// Implement `gt.Getter`. If zero, returns `nil`, otherwise returns `int64`.
func (self NullInt) Get() interface{} {
if self.IsNull() {
return nil
}
return int64(self)
}
// Implement `gt.Setter`, using `.Scan`. Panics on error.
func (self *NullInt) Set(src interface{}) { try(self.Scan(src)) }
// Implement `gt.Zeroer`, zeroing the receiver.
func (self *NullInt) Zero() {
if self != nil {
*self = 0
}
}
/*
Implement `fmt.Stringer`. If zero, returns an empty string. Otherwise formats
using `strconv.FormatInt`.
*/
func (self NullInt) String() string {
if self.IsNull() {
return ``
}
return strconv.FormatInt(int64(self), 10)
}
/*
Implement `gt.Parser`. If the input is empty, zeroes the receiver. Otherwise
parses the input using `strconv.ParseInt`.
*/
func (self *NullInt) Parse(src string) error {
if len(src) == 0 {
self.Zero()
return nil
}
val, err := strconv.ParseInt(src, 10, 64)
if err != nil {
return err
}
*self = NullInt(val)
return nil
}
// Implement `gt.Appender`, using the same representation as `.String`.
func (self NullInt) Append(buf []byte) []byte {
if self.IsNull() {
return buf
}
return strconv.AppendInt(buf, int64(self), 10)
}
/*
Implement `encoding.TextMarhaler`. If zero, returns nil. Otherwise returns the
same representation as `.String`.
*/
func (self NullInt) MarshalText() ([]byte, error) {
if self.IsNull() {
return nil, nil
}
return self.Append(nil), nil
}
// Implement `encoding.TextUnmarshaler`, using the same algorithm as `.Parse`.
func (self *NullInt) UnmarshalText(src []byte) error {
return self.Parse(bytesString(src))
}
/*
Implement `json.Marshaler`. If zero, returns bytes representing `null`.
Otherwise uses the default `json.Marshal` behavior for `int64`.
*/
func (self NullInt) MarshalJSON() ([]byte, error) {
if self.IsNull() {
return bytesNull, nil
}
return json.Marshal(self.Get())
}
/*
Implement `json.Unmarshaler`. If the input is empty or represents JSON `null`,
zeroes the receiver. Otherwise uses the default `json.Unmarshal` behavior
for `*int64`.
*/
func (self *NullInt) UnmarshalJSON(src []byte) error {
if isJsonEmpty(src) {
self.Zero()
return nil
}
return json.Unmarshal(src, self.GetPtr())
}
// Implement `driver.Valuer`, using `.Get`.
func (self NullInt) Value() (driver.Value, error) {
return self.Get(), nil
}
/*
Implement `sql.Scanner`, converting an arbitrary input to `gt.NullInt` and
modifying the receiver. Acceptable inputs:
* `nil` -> use `.Zero`
* `string` -> use `.Parse`
* `[]byte` -> use `.UnmarshalText`
* `intN` -> convert and assign
* `*intN` -> use `.Zero` or convert and assign
* `NullInt` -> assign
* `gt.Getter` -> scan underlying value
*/
func (self *NullInt) Scan(src interface{}) error {
switch src := src.(type) {
case nil:
self.Zero()
return nil
case string:
return self.Parse(src)
case []byte:
return self.UnmarshalText(src)
case int:
*self = NullInt(src)
return nil
case *int:
if src == nil {
self.Zero()
} else {
*self = NullInt(*src)
}
return nil
case int8:
*self = NullInt(src)
return nil
case *int8:
if src == nil {
self.Zero()
} else {
*self = NullInt(*src)
}
return nil
case int16:
*self = NullInt(src)
return nil
case *int16:
if src == nil {
self.Zero()
} else {
*self = NullInt(*src)
}
return nil
case int32:
*self = NullInt(src)
return nil
case *int32:
if src == nil {
self.Zero()
} else {
*self = NullInt(*src)
}
return nil
case int64:
*self = NullInt(src)
return nil
case *int64:
if src == nil {
self.Zero()
} else {
*self = NullInt(*src)
}
return nil
case NullInt:
*self = src
return nil
default:
val, ok := get(src)
if ok {
return self.Scan(val)
}
return errScanType(self, src)
}
}
/*
Free cast to the underlying `int64`. Sometimes handy when this type is embedded
in a struct.
*/
func (self NullInt) Int64() int64 { return int64(self) } | gt_null_int.go | 0.735167 | 0.636042 | gt_null_int.go | starcoder |
package day_08
const input = `
rect 1x1
rotate row y=0 by 10
rect 1x1
rotate row y=0 by 10
rect 1x1
rotate row y=0 by 5
rect 1x1
rotate row y=0 by 3
rect 2x1
rotate row y=0 by 4
rect 1x1
rotate row y=0 by 3
rect 1x1
rotate row y=0 by 2
rect 1x1
rotate row y=0 by 3
rect 2x1
rotate row y=0 by 2
rect 1x1
rotate row y=0 by 3
rect 2x1
rotate row y=0 by 5
rotate column x=0 by 1
rect 4x1
rotate row y=1 by 12
rotate row y=0 by 10
rotate column x=0 by 1
rect 9x1
rotate column x=7 by 1
rotate row y=1 by 3
rotate row y=0 by 2
rect 1x2
rotate row y=1 by 3
rotate row y=0 by 1
rect 1x3
rotate column x=35 by 1
rotate column x=5 by 2
rotate row y=2 by 5
rotate row y=1 by 5
rotate row y=0 by 2
rect 1x3
rotate row y=2 by 8
rotate row y=1 by 10
rotate row y=0 by 5
rotate column x=5 by 1
rotate column x=0 by 1
rect 6x1
rotate row y=2 by 7
rotate row y=0 by 5
rotate column x=0 by 1
rect 4x1
rotate column x=40 by 2
rotate row y=2 by 10
rotate row y=0 by 12
rotate column x=5 by 1
rotate column x=0 by 1
rect 9x1
rotate column x=43 by 1
rotate column x=40 by 2
rotate column x=38 by 1
rotate column x=15 by 1
rotate row y=3 by 35
rotate row y=2 by 35
rotate row y=1 by 32
rotate row y=0 by 40
rotate column x=32 by 1
rotate column x=29 by 1
rotate column x=27 by 1
rotate column x=25 by 1
rotate column x=23 by 2
rotate column x=22 by 1
rotate column x=21 by 3
rotate column x=20 by 1
rotate column x=18 by 3
rotate column x=17 by 1
rotate column x=15 by 1
rotate column x=14 by 1
rotate column x=12 by 1
rotate column x=11 by 3
rotate column x=10 by 1
rotate column x=9 by 1
rotate column x=8 by 2
rotate column x=7 by 1
rotate column x=4 by 1
rotate column x=3 by 1
rotate column x=2 by 1
rotate column x=0 by 1
rect 34x1
rotate column x=44 by 1
rotate column x=24 by 1
rotate column x=19 by 1
rotate row y=1 by 8
rotate row y=0 by 10
rotate column x=8 by 1
rotate column x=7 by 1
rotate column x=6 by 1
rotate column x=5 by 2
rotate column x=3 by 1
rotate column x=2 by 1
rotate column x=1 by 1
rotate column x=0 by 1
rect 9x1
rotate row y=0 by 40
rotate column x=43 by 1
rotate row y=4 by 10
rotate row y=3 by 10
rotate row y=2 by 5
rotate row y=1 by 10
rotate row y=0 by 15
rotate column x=7 by 2
rotate column x=6 by 3
rotate column x=5 by 2
rotate column x=3 by 2
rotate column x=2 by 4
rotate column x=0 by 2
rect 9x2
rotate row y=3 by 47
rotate row y=0 by 10
rotate column x=42 by 3
rotate column x=39 by 4
rotate column x=34 by 3
rotate column x=32 by 3
rotate column x=29 by 3
rotate column x=22 by 3
rotate column x=19 by 3
rotate column x=14 by 4
rotate column x=4 by 3
rotate row y=4 by 3
rotate row y=3 by 8
rotate row y=1 by 5
rotate column x=2 by 3
rotate column x=1 by 3
rotate column x=0 by 2
rect 3x2
rotate row y=4 by 8
rotate column x=45 by 1
rotate column x=40 by 5
rotate column x=26 by 3
rotate column x=25 by 5
rotate column x=15 by 5
rotate column x=10 by 5
rotate column x=7 by 5
rotate row y=5 by 35
rotate row y=4 by 42
rotate row y=2 by 5
rotate row y=1 by 20
rotate row y=0 by 45
rotate column x=48 by 5
rotate column x=47 by 5
rotate column x=46 by 5
rotate column x=43 by 5
rotate column x=41 by 5
rotate column x=38 by 5
rotate column x=37 by 5
rotate column x=36 by 5
rotate column x=33 by 1
rotate column x=32 by 5
rotate column x=31 by 5
rotate column x=30 by 1
rotate column x=28 by 5
rotate column x=27 by 5
rotate column x=26 by 5
rotate column x=23 by 1
rotate column x=22 by 5
rotate column x=21 by 5
rotate column x=20 by 1
rotate column x=17 by 5
rotate column x=16 by 5
rotate column x=13 by 1
rotate column x=12 by 3
rotate column x=7 by 5
rotate column x=6 by 5
rotate column x=3 by 1
rotate column x=2 by 3
` | adventofcode_2016/day_08/input.go | 0.692226 | 0.78535 | input.go | starcoder |
package container
import (
"github.com/nwillc/genfuncs"
)
// Heap implements Queue.
var _ Queue[int] = (*Heap[int])(nil)
// Heap implements an ordered heap of any type which can be min heap or max heap depending on the compare provided.
// Heap implements Queue.
type Heap[T any] struct {
slice GSlice[T]
compare genfuncs.BiFunction[T, T, bool]
ordered bool
}
// NewHeap return a heap ordered based on the compare and adds any values provided.
func NewHeap[T any](compare genfuncs.BiFunction[T, T, bool], values ...T) (heap *Heap[T]) {
heap = &Heap[T]{
compare: compare,
slice: make(GSlice[T], 0, len(values)),
}
heap.AddAll(values...)
return heap
}
// Len returns current length of the heap.
func (h *Heap[T]) Len() (length int) { length = h.slice.Len(); return length }
// Add a value onto the heap.
func (h *Heap[T]) Add(v T) {
h.slice = append(h.slice, v)
h.up(h.Len() - 1)
h.ordered = false
}
// AddAll the values onto the Heap.
func (h *Heap[T]) AddAll(values ...T) {
end := h.Len()
h.slice = append(h.slice, values...)
for ; end < h.Len(); end++ {
h.up(end)
}
}
// Peek returns the next element without removing it.
func (h *Heap[T]) Peek() (value T) {
if h.Len() <= 0 {
panic(genfuncs.NoSuchElement)
}
n := h.Len() - 1
if n > 0 && !h.ordered {
h.slice.Swap(0, n)
h.down()
h.ordered = true
}
value = h.slice[n]
return value
}
// Remove an item off the heap.
func (h *Heap[T]) Remove() (value T) {
value = h.Peek()
h.slice = h.slice[0 : h.Len()-1]
h.ordered = false
return value
}
// Values returns a slice of the values in the Heap in no particular order.
func (h *Heap[T]) Values() (values GSlice[T]) {
values = h.slice
return values
}
func (h *Heap[T]) up(jj int) {
for {
i := parent(jj)
if i == jj || !h.compare(h.slice[jj], h.slice[i]) {
break
}
h.slice.Swap(i, jj)
jj = i
}
}
func (h *Heap[T]) down() {
n := h.Len() - 1
i1 := 0
for {
j1 := left(i1)
if j1 >= n || j1 < 0 {
break
}
j := j1
j2 := right(i1)
if j2 < n && h.compare(h.slice[j2], h.slice[j1]) {
j = j2
}
if !h.compare(h.slice[j], h.slice[i1]) {
break
}
h.slice.Swap(i1, j)
i1 = j
}
}
func parent(i int) (p int) { p = (i - 1) / 2; return p }
func left(i int) (l int) { l = (i * 2) + 1; return l }
func right(i int) (r int) { r = left(i) + 1; return r } | container/heap.go | 0.799364 | 0.491517 | heap.go | starcoder |
package chargingstrategy
import (
"math"
"github.com/Telenav/osrm-backend/integration/util"
)
type simpleChargeStrategy struct {
maxEnergyLevel float64
sixtyPercentOFMaxEnergy float64
eightyPercentOfMaxEnergy float64
costFrom60PercentTo80Percent float64
costFrom60PercentTo100Percent float64
costFrom80PercentTo100Percent float64
stateCandidates []State
}
// NewSimpleChargingStrategy creates fake charge strategy
func NewSimpleChargingStrategy(maxEnergyLevel float64) *simpleChargeStrategy {
sixtyPercentOFMaxEnergy := math.Round(maxEnergyLevel*0.6*100) / 100
eightyPercentOfMaxEnergy := math.Round(maxEnergyLevel*0.8*100) / 100
maxEnergyLevel = math.Round(maxEnergyLevel*100) / 100
costFrom60PercentTo80Percent := 3600.0
costFrom60PercentTo100Percent := 10800.0
costFrom80PercentTo100Percent := 7200.0
stateCandidates := []State{
{
Energy: sixtyPercentOFMaxEnergy,
},
{
Energy: eightyPercentOfMaxEnergy,
},
{
Energy: maxEnergyLevel,
},
}
return &simpleChargeStrategy{
maxEnergyLevel: maxEnergyLevel,
sixtyPercentOFMaxEnergy: sixtyPercentOFMaxEnergy,
eightyPercentOfMaxEnergy: eightyPercentOfMaxEnergy,
costFrom60PercentTo80Percent: costFrom60PercentTo80Percent,
costFrom60PercentTo100Percent: costFrom60PercentTo100Percent,
costFrom80PercentTo100Percent: costFrom80PercentTo100Percent,
stateCandidates: stateCandidates,
}
}
// @todo:
// - Influence of returning candidate with no charge time and additional energy
// CreateChargingStates returns different charging strategy
func (f *simpleChargeStrategy) CreateChargingStates() []State {
return f.stateCandidates
}
var zeroChargeCost = ChargingCost{
Duration: 0.0,
}
// Fake charge strategy
// From empty energy:
// charge rule #1: 1 hour charge to 60% of max energy
// charge rule #2: 2 hour charge to 80%, means from 60% ~ 80% need 1 hour
// charge rule #3: 4 hour charge to 100%, means from 80% ~ 100% need 2 hours
func (f *simpleChargeStrategy) EvaluateCost(arrivalEnergy float64, targetState State) ChargingCost {
sixtyPercentOfMaxEnergy := f.sixtyPercentOFMaxEnergy
eightyPercentOfMaxEnergy := f.eightyPercentOfMaxEnergy
if arrivalEnergy > targetState.Energy ||
util.Float64Equal(targetState.Energy, 0.0) {
return zeroChargeCost
}
totalTime := 0.0
if arrivalEnergy < sixtyPercentOfMaxEnergy {
energyNeeded4Stage1 := sixtyPercentOfMaxEnergy - arrivalEnergy
totalTime += energyNeeded4Stage1 / sixtyPercentOfMaxEnergy * 3600.0
if util.Float64Equal(targetState.Energy, sixtyPercentOfMaxEnergy) {
return ChargingCost{
Duration: totalTime,
}
} else if util.Float64Equal(targetState.Energy, eightyPercentOfMaxEnergy) {
return ChargingCost{
Duration: totalTime + f.costFrom60PercentTo80Percent,
}
}
return ChargingCost{
Duration: totalTime + f.costFrom60PercentTo100Percent,
}
}
if arrivalEnergy < eightyPercentOfMaxEnergy {
energyNeeded4Stage2 := eightyPercentOfMaxEnergy - arrivalEnergy
totalTime += energyNeeded4Stage2 / (eightyPercentOfMaxEnergy - sixtyPercentOfMaxEnergy) * 3600.0
if util.Float64Equal(targetState.Energy, eightyPercentOfMaxEnergy) {
return ChargingCost{
Duration: totalTime,
}
}
return ChargingCost{
Duration: totalTime + f.costFrom80PercentTo100Percent,
}
}
if arrivalEnergy < f.maxEnergyLevel {
energyNeeded4Stage3 := f.maxEnergyLevel - arrivalEnergy
totalTime += energyNeeded4Stage3 / (f.maxEnergyLevel - eightyPercentOfMaxEnergy) * 7200.0
if util.Float64Equal(targetState.Energy, f.maxEnergyLevel) {
return ChargingCost{
Duration: totalTime,
}
}
}
return zeroChargeCost
} | integration/service/oasis/graph/chargingstrategy/simple_charge_strategy.go | 0.722037 | 0.565119 | simple_charge_strategy.go | starcoder |
package key
import (
"bytes"
"encoding/base64"
"encoding/binary"
"errors"
"math"
"time"
"github.com/genjidb/genji/document"
)
const base64encoder = "-0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ_abcdefghijklmnopqrstuvwxyz"
var base64Encoding = base64.NewEncoding(base64encoder).WithPadding(base64.NoPadding)
const arrayValueDelim = 0x1f
const arrayEnd = 0x1e
const documentValueDelim = 0x1c
const documentEnd = 0x1d
// AppendBool takes a bool and returns its binary representation.
func AppendBool(buf []byte, x bool) []byte {
if x {
return append(buf, 1)
}
return append(buf, 0)
}
// DecodeBool takes a byte slice and decodes it into a boolean.
func DecodeBool(buf []byte) bool {
return buf[0] == 1
}
// AppendUint64 takes an uint64 and returns its binary representation.
func AppendUint64(buf []byte, x uint64) []byte {
var b [8]byte
binary.BigEndian.PutUint64(b[:], x)
return append(buf, b[:]...)
}
// DecodeUint64 takes a byte slice and decodes it into a uint64.
func DecodeUint64(buf []byte) (uint64, error) {
if len(buf) < 8 {
return 0, errors.New("cannot decode buffer to uint64")
}
return binary.BigEndian.Uint64(buf), nil
}
// AppendInt64 takes an int64 and returns its binary representation.
func AppendInt64(buf []byte, x int64) []byte {
var b [8]byte
binary.BigEndian.PutUint64(b[:], uint64(x)+math.MaxInt64+1)
return append(buf, b[:]...)
}
// DecodeInt64 takes a byte slice and decodes it into an int64.
func DecodeInt64(buf []byte) (int64, error) {
x, err := DecodeUint64(buf)
x -= math.MaxInt64 + 1
return int64(x), err
}
// AppendFloat64 takes an float64 and returns its binary representation.
func AppendFloat64(buf []byte, x float64) []byte {
fb := math.Float64bits(x)
if x >= 0 {
fb ^= 1 << 63
} else {
fb ^= 1<<64 - 1
}
return AppendUint64(buf, fb)
}
// DecodeFloat64 takes a byte slice and decodes it into an float64.
func DecodeFloat64(buf []byte) (float64, error) {
x := binary.BigEndian.Uint64(buf)
if (x & (1 << 63)) != 0 {
x ^= 1 << 63
} else {
x ^= 1<<64 - 1
}
return math.Float64frombits(x), nil
}
// AppendBase64 encodes data into a custom base64 encoding. The resulting slice respects
// natural sort-ordering.
func AppendBase64(buf []byte, data []byte) ([]byte, error) {
b := bytes.NewBuffer(buf)
enc := base64.NewEncoder(base64Encoding, b)
_, err := enc.Write(data)
if err != nil {
return nil, err
}
err = enc.Close()
if err != nil {
return nil, err
}
return b.Bytes(), nil
}
// DecodeBase64 decodes a custom base64 encoded byte slice,
// encoded with AppendBase64.
func DecodeBase64(data []byte) ([]byte, error) {
var buf bytes.Buffer
dec := base64.NewDecoder(base64Encoding, bytes.NewReader(data))
_, err := buf.ReadFrom(dec)
if err != nil {
return nil, err
}
return buf.Bytes(), nil
}
// AppendNumber takes a number value, integer or double, and encodes it in 16 bytes
// so that encoded integers and doubles are naturally ordered.
// Integers will fist be encoded using AppendInt64 on 8 bytes, then 8 zero-bytes will be
// appended to them.
// Doubles will first be converted to integer, encoded using AppendInt64,
// then AppendFloat64 will be called with the float value.
func AppendNumber(buf []byte, v document.Value) ([]byte, error) {
if !v.Type.IsNumber() {
return nil, errors.New("expected number type")
}
if v.Type == document.IntegerValue {
// appending 8 zero bytes so that the integer has the same size as the double
// but always lower for the same value.
return append(AppendInt64(buf, v.V.(int64)), 0, 0, 0, 0, 0, 0, 0, 0), nil
}
x := v.V.(float64)
if x > math.MaxInt64 {
return AppendFloat64(AppendInt64(buf, math.MaxInt64), x), nil
}
return AppendFloat64(AppendInt64(buf, int64(x)), x), nil
}
// AppendArray encodes an array into a sort-ordered binary representation.
func AppendArray(buf []byte, a document.Array) ([]byte, error) {
err := a.Iterate(func(i int, value document.Value) error {
var err error
if i > 0 {
buf = append(buf, arrayValueDelim)
}
buf, err = AppendValue(buf, value)
if err != nil {
return err
}
return nil
})
if err != nil {
return nil, err
}
buf = append(buf, arrayEnd)
return buf, nil
}
func decodeValue(data []byte, delim, end byte) (document.Value, int, error) {
t := document.ValueType(data[0])
i := 1
switch t {
case document.ArrayValue:
a, n, err := decodeArray(data[i:])
i += n
if err != nil {
return document.Value{}, i, err
}
return document.NewArrayValue(a), i, nil
case document.DocumentValue:
d, n, err := decodeDocument(data[i:])
i += n
if err != nil {
return document.Value{}, i, err
}
return document.NewDocumentValue(d), i, nil
case document.NullValue:
case document.BoolValue:
i++
case document.DoubleValue:
i += 16
case document.DurationValue:
i += 8
case document.BlobValue, document.TextValue:
for i < len(data) && data[i] != delim && data[i] != end {
i++
}
default:
return document.Value{}, 0, errors.New("invalid type character")
}
v, err := DecodeValue(data[:i])
return v, i, err
}
// DecodeArray decodes an array.
func DecodeArray(data []byte) (document.Array, error) {
a, _, err := decodeArray(data)
return a, err
}
func decodeArray(data []byte) (document.Array, int, error) {
var vb document.ValueBuffer
var readCount int
for len(data) > 0 && data[0] != arrayEnd {
v, i, err := decodeValue(data, arrayValueDelim, arrayEnd)
if err != nil {
return nil, i, err
}
vb = vb.Append(v)
// skip the delimiter
if data[i] == arrayValueDelim {
i++
}
readCount += i
data = data[i:]
}
// skip the array end character
readCount++
return vb, readCount, nil
}
// AppendDocument encodes a document into a sort-ordered binary representation.
func AppendDocument(buf []byte, d document.Document) ([]byte, error) {
var i int
err := d.Iterate(func(field string, value document.Value) error {
var err error
if i > 0 {
buf = append(buf, documentValueDelim)
}
buf, err = AppendBase64(buf, []byte(field))
if err != nil {
return err
}
buf = append(buf, documentValueDelim)
buf, err = AppendValue(buf, value)
if err != nil {
return err
}
i++
return nil
})
if err != nil {
return nil, err
}
buf = append(buf, documentEnd)
return buf, nil
}
// DecodeDocument decodes a document.
func DecodeDocument(data []byte) (document.Document, error) {
a, _, err := decodeDocument(data)
return a, err
}
func decodeDocument(data []byte) (document.Document, int, error) {
var fb document.FieldBuffer
var readCount int
for len(data) > 0 && data[0] != documentEnd {
i := 0
for i < len(data) && data[i] != documentValueDelim {
i++
}
field, err := DecodeBase64(data[:i])
if err != nil {
return nil, 0, err
}
// skip the delimiter
i++
if i >= len(data) {
return nil, 0, errors.New("invalid end of input")
}
readCount += i
data = data[i:]
v, i, err := decodeValue(data, documentValueDelim, documentEnd)
if err != nil {
return nil, i, err
}
fb.Add(string(field), v)
// skip the delimiter
if data[i] == documentValueDelim {
i++
}
readCount += i
data = data[i:]
}
// skip the document end character
readCount++
return &fb, readCount, nil
}
// AppendValue encodes a value as a key.
func AppendValue(buf []byte, v document.Value) ([]byte, error) {
if v.Type == document.IntegerValue || v.Type == document.DoubleValue {
buf = append(buf, byte(document.DoubleValue))
} else {
buf = append(buf, byte(v.Type))
}
switch v.Type {
case document.BlobValue:
return AppendBase64(buf, v.V.([]byte))
case document.TextValue:
text := v.V.(string)
return AppendBase64(buf, []byte(text))
case document.BoolValue:
return AppendBool(buf, v.V.(bool)), nil
case document.IntegerValue, document.DoubleValue:
return AppendNumber(buf, v)
case document.DurationValue:
return AppendInt64(buf, int64(v.V.(time.Duration))), nil
case document.NullValue:
return buf, nil
case document.ArrayValue:
return AppendArray(buf, v.V.(document.Array))
case document.DocumentValue:
return AppendDocument(buf, v.V.(document.Document))
}
return nil, errors.New("cannot encode type " + v.Type.String() + " as key")
}
// DecodeValue takes some encoded data and decodes it to the target type t.
func DecodeValue(data []byte) (document.Value, error) {
t := document.ValueType(data[0])
data = data[1:]
switch t {
case document.BlobValue:
t, err := DecodeBase64(data)
if err != nil {
return document.Value{}, err
}
return document.NewBlobValue(t), nil
case document.TextValue:
t, err := DecodeBase64(data)
if err != nil {
return document.Value{}, err
}
return document.NewTextValue(string(t)), nil
case document.BoolValue:
return document.NewBoolValue(DecodeBool(data)), nil
case document.DoubleValue:
if bytes.Equal(data[8:], []byte{0, 0, 0, 0, 0, 0, 0, 0}) {
x, err := DecodeInt64(data[:8])
if err != nil {
return document.Value{}, err
}
return document.NewIntegerValue(x), nil
}
x, err := DecodeFloat64(data[8:])
if err != nil {
return document.Value{}, err
}
return document.NewDoubleValue(x), nil
case document.DurationValue:
x, err := DecodeInt64(data)
if err != nil {
return document.Value{}, err
}
return document.NewDurationValue(time.Duration(x)), nil
case document.NullValue:
return document.NewNullValue(), nil
case document.ArrayValue:
a, err := DecodeArray(data)
if err != nil {
return document.Value{}, err
}
return document.NewArrayValue(a), nil
case document.DocumentValue:
d, err := DecodeDocument(data)
if err != nil {
return document.Value{}, err
}
return document.NewDocumentValue(d), nil
}
return document.Value{}, errors.New("unknown type")
}
// Append encodes a value of the type t as a key.
// The encoded key doesn't include type information.
func Append(buf []byte, t document.ValueType, v interface{}) ([]byte, error) {
switch t {
case document.BlobValue:
return append(buf, v.([]byte)...), nil
case document.TextValue:
return append(buf, v.(string)...), nil
case document.BoolValue:
return AppendBool(buf, v.(bool)), nil
case document.IntegerValue:
return AppendInt64(buf, v.(int64)), nil
case document.DoubleValue:
return AppendFloat64(buf, v.(float64)), nil
case document.DurationValue:
return AppendInt64(buf, int64(v.(time.Duration))), nil
case document.NullValue:
return buf, nil
case document.ArrayValue:
return AppendArray(buf, v.(document.Array))
case document.DocumentValue:
return AppendDocument(buf, v.(document.Document))
}
return nil, errors.New("cannot encode type " + t.String() + " as key")
}
// Decode takes some encoded data and decodes it to the target type t.
func Decode(t document.ValueType, data []byte) (document.Value, error) {
switch t {
case document.BlobValue:
return document.NewBlobValue(data), nil
case document.TextValue:
return document.NewTextValue(string(data)), nil
case document.BoolValue:
return document.NewBoolValue(DecodeBool(data)), nil
case document.IntegerValue:
x, err := DecodeInt64(data)
if err != nil {
return document.Value{}, err
}
return document.NewIntegerValue(x), nil
case document.DoubleValue:
x, err := DecodeFloat64(data)
if err != nil {
return document.Value{}, err
}
return document.NewDoubleValue(x), nil
case document.DurationValue:
x, err := DecodeInt64(data)
if err != nil {
return document.Value{}, err
}
return document.NewDurationValue(time.Duration(x)), nil
case document.NullValue:
return document.NewNullValue(), nil
case document.ArrayValue:
a, err := DecodeArray(data)
if err != nil {
return document.Value{}, err
}
return document.NewArrayValue(a), nil
case document.DocumentValue:
d, err := DecodeDocument(data)
if err != nil {
return document.Value{}, err
}
return document.NewDocumentValue(d), nil
}
return document.Value{}, errors.New("unknown type")
} | key/encoding.go | 0.689201 | 0.424651 | encoding.go | starcoder |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.