2022-10-09 15:23:52 +02:00
|
|
|
package cache
|
|
|
|
|
|
|
|
|
|
import (
|
|
|
|
|
"fmt"
|
2025-04-12 10:43:40 +02:00
|
|
|
"log/slog"
|
2025-11-21 10:21:08 +01:00
|
|
|
"sync"
|
2022-10-14 22:41:56 +02:00
|
|
|
"time"
|
2022-10-09 15:23:52 +02:00
|
|
|
|
|
|
|
|
"github.com/sparetimecoders/goamqp"
|
2025-11-21 10:21:08 +01:00
|
|
|
"gitlab.com/unboundsoftware/eventsourced/eventsourced"
|
2022-10-09 15:23:52 +02:00
|
|
|
|
2026-01-17 22:53:46 +01:00
|
|
|
"gitea.unbound.se/unboundsoftware/schemas/domain"
|
|
|
|
|
"gitea.unbound.se/unboundsoftware/schemas/hash"
|
2022-10-09 15:23:52 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
|
|
type Cache struct {
|
2025-11-21 10:21:08 +01:00
|
|
|
mu sync.RWMutex
|
2023-04-27 07:09:10 +02:00
|
|
|
organizations map[string]domain.Organization
|
|
|
|
|
users map[string][]string
|
2025-11-20 22:11:17 +01:00
|
|
|
apiKeys map[string]domain.APIKey // keyed by organizationId-name
|
2023-04-27 07:09:10 +02:00
|
|
|
services map[string]map[string]map[string]struct{}
|
|
|
|
|
subGraphs map[string]string
|
|
|
|
|
lastUpdate map[string]string
|
2025-04-12 10:43:40 +02:00
|
|
|
logger *slog.Logger
|
2023-04-27 07:09:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (c *Cache) OrganizationByAPIKey(apiKey string) *domain.Organization {
|
2025-11-21 10:21:08 +01:00
|
|
|
c.mu.RLock()
|
|
|
|
|
defer c.mu.RUnlock()
|
|
|
|
|
|
2025-11-20 22:11:17 +01:00
|
|
|
// Find the API key by comparing hashes
|
|
|
|
|
for _, key := range c.apiKeys {
|
|
|
|
|
if hash.CompareAPIKey(key.Key, apiKey) {
|
|
|
|
|
org, exists := c.organizations[key.OrganizationId]
|
|
|
|
|
if !exists {
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
return &org
|
|
|
|
|
}
|
2023-04-27 07:09:10 +02:00
|
|
|
}
|
2025-11-20 22:11:17 +01:00
|
|
|
return nil
|
2023-04-27 07:09:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (c *Cache) OrganizationsByUser(sub string) []domain.Organization {
|
2025-11-21 10:21:08 +01:00
|
|
|
c.mu.RLock()
|
|
|
|
|
defer c.mu.RUnlock()
|
|
|
|
|
|
2023-04-27 07:09:10 +02:00
|
|
|
orgIds := c.users[sub]
|
|
|
|
|
orgs := make([]domain.Organization, len(orgIds))
|
|
|
|
|
for i, id := range orgIds {
|
|
|
|
|
orgs[i] = c.organizations[id]
|
|
|
|
|
}
|
|
|
|
|
return orgs
|
|
|
|
|
}
|
|
|
|
|
|
2025-11-22 18:37:07 +01:00
|
|
|
func (c *Cache) AllOrganizations() []domain.Organization {
|
|
|
|
|
c.mu.RLock()
|
|
|
|
|
defer c.mu.RUnlock()
|
|
|
|
|
|
|
|
|
|
orgs := make([]domain.Organization, 0, len(c.organizations))
|
|
|
|
|
for _, org := range c.organizations {
|
|
|
|
|
orgs = append(orgs, org)
|
|
|
|
|
}
|
|
|
|
|
return orgs
|
|
|
|
|
}
|
|
|
|
|
|
2023-04-27 07:09:10 +02:00
|
|
|
func (c *Cache) ApiKeyByKey(key string) *domain.APIKey {
|
2025-11-21 10:21:08 +01:00
|
|
|
c.mu.RLock()
|
|
|
|
|
defer c.mu.RUnlock()
|
|
|
|
|
|
2025-11-20 22:11:17 +01:00
|
|
|
// Find the API key by comparing hashes
|
|
|
|
|
for _, apiKey := range c.apiKeys {
|
|
|
|
|
if hash.CompareAPIKey(apiKey.Key, key) {
|
|
|
|
|
return &apiKey
|
|
|
|
|
}
|
2023-04-27 07:09:10 +02:00
|
|
|
}
|
2025-11-20 22:11:17 +01:00
|
|
|
return nil
|
2022-10-09 15:23:52 +02:00
|
|
|
}
|
|
|
|
|
|
2023-04-27 07:09:10 +02:00
|
|
|
func (c *Cache) Services(orgId, ref, lastUpdate string) ([]string, string) {
|
2025-11-21 10:21:08 +01:00
|
|
|
c.mu.RLock()
|
|
|
|
|
defer c.mu.RUnlock()
|
|
|
|
|
|
2023-04-27 07:09:10 +02:00
|
|
|
key := refKey(orgId, ref)
|
2022-10-09 15:23:52 +02:00
|
|
|
var services []string
|
2023-04-27 07:09:10 +02:00
|
|
|
if lastUpdate == "" || c.lastUpdate[key] > lastUpdate {
|
|
|
|
|
for k := range c.services[orgId][ref] {
|
2022-10-14 22:41:56 +02:00
|
|
|
services = append(services, k)
|
|
|
|
|
}
|
2022-10-09 15:23:52 +02:00
|
|
|
}
|
2023-04-27 07:09:10 +02:00
|
|
|
return services, c.lastUpdate[key]
|
2022-10-09 15:23:52 +02:00
|
|
|
}
|
|
|
|
|
|
2023-04-27 07:09:10 +02:00
|
|
|
func (c *Cache) SubGraphId(orgId, ref, service string) string {
|
2025-11-21 10:21:08 +01:00
|
|
|
c.mu.RLock()
|
|
|
|
|
defer c.mu.RUnlock()
|
|
|
|
|
|
2023-04-27 07:09:10 +02:00
|
|
|
return c.subGraphs[subGraphKey(orgId, ref, service)]
|
2022-10-09 15:23:52 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (c *Cache) Update(msg any, _ goamqp.Headers) (any, error) {
|
2025-11-21 10:21:08 +01:00
|
|
|
c.mu.Lock()
|
|
|
|
|
defer c.mu.Unlock()
|
|
|
|
|
|
2022-10-09 15:23:52 +02:00
|
|
|
switch m := msg.(type) {
|
2023-04-27 07:09:10 +02:00
|
|
|
case *domain.OrganizationAdded:
|
2025-11-21 10:21:08 +01:00
|
|
|
o := domain.Organization{
|
|
|
|
|
BaseAggregate: eventsourced.BaseAggregateFromString(m.ID.String()),
|
|
|
|
|
}
|
2023-04-27 07:09:10 +02:00
|
|
|
m.UpdateOrganization(&o)
|
|
|
|
|
c.organizations[m.ID.String()] = o
|
|
|
|
|
c.addUser(m.Initiator, o)
|
2025-11-21 10:21:08 +01:00
|
|
|
c.logger.With("org_id", m.ID.String(), "event", "OrganizationAdded").Debug("cache updated")
|
2025-11-22 18:37:07 +01:00
|
|
|
case *domain.UserAddedToOrganization:
|
|
|
|
|
org, exists := c.organizations[m.ID.String()]
|
|
|
|
|
if exists {
|
|
|
|
|
m.UpdateOrganization(&org)
|
|
|
|
|
c.organizations[m.ID.String()] = org
|
|
|
|
|
c.addUser(m.UserId, org)
|
|
|
|
|
c.logger.With("org_id", m.ID.String(), "user_id", m.UserId, "event", "UserAddedToOrganization").Debug("cache updated")
|
|
|
|
|
} else {
|
|
|
|
|
c.logger.With("org_id", m.ID.String(), "event", "UserAddedToOrganization").Warn("organization not found in cache")
|
|
|
|
|
}
|
2023-04-27 07:09:10 +02:00
|
|
|
case *domain.APIKeyAdded:
|
|
|
|
|
key := domain.APIKey{
|
|
|
|
|
Name: m.Name,
|
|
|
|
|
OrganizationId: m.OrganizationId,
|
2025-11-20 22:11:17 +01:00
|
|
|
Key: m.Key, // This is now the hashed key
|
2023-04-27 07:09:10 +02:00
|
|
|
Refs: m.Refs,
|
|
|
|
|
Read: m.Read,
|
|
|
|
|
Publish: m.Publish,
|
|
|
|
|
CreatedBy: m.Initiator,
|
|
|
|
|
CreatedAt: m.When(),
|
|
|
|
|
}
|
2025-11-20 22:11:17 +01:00
|
|
|
// Use composite key: organizationId-name
|
|
|
|
|
c.apiKeys[apiKeyId(m.OrganizationId, m.Name)] = key
|
2023-04-27 07:09:10 +02:00
|
|
|
org := c.organizations[m.OrganizationId]
|
|
|
|
|
org.APIKeys = append(org.APIKeys, key)
|
|
|
|
|
c.organizations[m.OrganizationId] = org
|
2025-11-21 10:21:08 +01:00
|
|
|
c.logger.With("org_id", m.OrganizationId, "key_name", m.Name, "event", "APIKeyAdded").Debug("cache updated")
|
2025-11-22 18:37:07 +01:00
|
|
|
case *domain.APIKeyRemoved:
|
|
|
|
|
orgId := m.ID.String()
|
|
|
|
|
org, exists := c.organizations[orgId]
|
|
|
|
|
if exists {
|
|
|
|
|
// Remove from organization's API keys list
|
|
|
|
|
for i, key := range org.APIKeys {
|
|
|
|
|
if key.Name == m.KeyName {
|
|
|
|
|
org.APIKeys = append(org.APIKeys[:i], org.APIKeys[i+1:]...)
|
|
|
|
|
break
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
c.organizations[orgId] = org
|
|
|
|
|
// Remove from apiKeys map
|
|
|
|
|
delete(c.apiKeys, apiKeyId(orgId, m.KeyName))
|
|
|
|
|
c.logger.With("org_id", orgId, "key_name", m.KeyName, "event", "APIKeyRemoved").Debug("cache updated")
|
|
|
|
|
} else {
|
|
|
|
|
c.logger.With("org_id", orgId, "event", "APIKeyRemoved").Warn("organization not found in cache")
|
|
|
|
|
}
|
|
|
|
|
case *domain.OrganizationRemoved:
|
|
|
|
|
orgId := m.ID.String()
|
|
|
|
|
org, exists := c.organizations[orgId]
|
|
|
|
|
if exists {
|
|
|
|
|
// Remove all API keys for this organization
|
|
|
|
|
for _, key := range org.APIKeys {
|
|
|
|
|
delete(c.apiKeys, apiKeyId(orgId, key.Name))
|
|
|
|
|
}
|
|
|
|
|
// Remove organization from all users
|
|
|
|
|
for userId, userOrgs := range c.users {
|
|
|
|
|
for i, userOrgId := range userOrgs {
|
|
|
|
|
if userOrgId == orgId {
|
|
|
|
|
c.users[userId] = append(userOrgs[:i], userOrgs[i+1:]...)
|
|
|
|
|
break
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
// If user has no more organizations, remove from map
|
|
|
|
|
if len(c.users[userId]) == 0 {
|
|
|
|
|
delete(c.users, userId)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
// Remove services for this organization
|
|
|
|
|
if refs, exists := c.services[orgId]; exists {
|
|
|
|
|
for ref := range refs {
|
|
|
|
|
// Remove all subgraphs for this org/ref combination
|
|
|
|
|
for service := range refs[ref] {
|
|
|
|
|
delete(c.subGraphs, subGraphKey(orgId, ref, service))
|
|
|
|
|
}
|
|
|
|
|
// Remove lastUpdate for this org/ref
|
|
|
|
|
delete(c.lastUpdate, refKey(orgId, ref))
|
|
|
|
|
}
|
|
|
|
|
delete(c.services, orgId)
|
|
|
|
|
}
|
|
|
|
|
// Remove organization
|
|
|
|
|
delete(c.organizations, orgId)
|
|
|
|
|
c.logger.With("org_id", orgId, "event", "OrganizationRemoved").Debug("cache updated")
|
|
|
|
|
} else {
|
|
|
|
|
c.logger.With("org_id", orgId, "event", "OrganizationRemoved").Warn("organization not found in cache")
|
|
|
|
|
}
|
2022-10-09 15:23:52 +02:00
|
|
|
case *domain.SubGraphUpdated:
|
2023-04-27 07:09:10 +02:00
|
|
|
c.updateSubGraph(m.OrganizationId, m.Ref, m.ID.String(), m.Service, m.Time)
|
2025-11-21 10:21:08 +01:00
|
|
|
c.logger.With("org_id", m.OrganizationId, "ref", m.Ref, "service", m.Service, "event", "SubGraphUpdated").Debug("cache updated")
|
2023-04-27 07:09:10 +02:00
|
|
|
case *domain.Organization:
|
|
|
|
|
c.organizations[m.ID.String()] = *m
|
|
|
|
|
c.addUser(m.CreatedBy, *m)
|
|
|
|
|
for _, k := range m.APIKeys {
|
2025-11-20 22:11:17 +01:00
|
|
|
// Use composite key: organizationId-name
|
|
|
|
|
c.apiKeys[apiKeyId(k.OrganizationId, k.Name)] = k
|
2022-10-09 15:23:52 +02:00
|
|
|
}
|
2025-11-21 10:21:08 +01:00
|
|
|
c.logger.With("org_id", m.ID.String(), "event", "Organization aggregate loaded").Debug("cache updated")
|
2022-10-09 15:23:52 +02:00
|
|
|
case *domain.SubGraph:
|
2023-04-27 07:09:10 +02:00
|
|
|
c.updateSubGraph(m.OrganizationId, m.Ref, m.ID.String(), m.Service, m.ChangedAt)
|
2025-11-21 10:21:08 +01:00
|
|
|
c.logger.With("org_id", m.OrganizationId, "ref", m.Ref, "service", m.Service, "event", "SubGraph aggregate loaded").Debug("cache updated")
|
2022-10-09 15:23:52 +02:00
|
|
|
default:
|
2025-04-12 10:43:40 +02:00
|
|
|
c.logger.With("msg", msg).Warn("unexpected message received")
|
2022-10-09 15:23:52 +02:00
|
|
|
}
|
|
|
|
|
return nil, nil
|
|
|
|
|
}
|
|
|
|
|
|
2023-04-27 07:09:10 +02:00
|
|
|
func (c *Cache) updateSubGraph(orgId string, ref string, subGraphId string, service string, updated time.Time) {
|
|
|
|
|
if _, exists := c.services[orgId]; !exists {
|
|
|
|
|
c.services[orgId] = make(map[string]map[string]struct{})
|
|
|
|
|
}
|
|
|
|
|
if _, exists := c.services[orgId][ref]; !exists {
|
|
|
|
|
c.services[orgId][ref] = make(map[string]struct{})
|
|
|
|
|
}
|
|
|
|
|
c.services[orgId][ref][subGraphId] = struct{}{}
|
|
|
|
|
c.subGraphs[subGraphKey(orgId, ref, service)] = subGraphId
|
|
|
|
|
c.lastUpdate[refKey(orgId, ref)] = updated.Format(time.RFC3339Nano)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func (c *Cache) addUser(sub string, organization domain.Organization) {
|
|
|
|
|
user, exists := c.users[sub]
|
2025-11-21 10:21:08 +01:00
|
|
|
orgId := organization.ID.String()
|
2023-04-27 07:09:10 +02:00
|
|
|
if !exists {
|
2025-11-21 10:21:08 +01:00
|
|
|
c.users[sub] = []string{orgId}
|
|
|
|
|
return
|
2023-04-27 07:09:10 +02:00
|
|
|
}
|
2025-11-21 10:21:08 +01:00
|
|
|
|
|
|
|
|
// Check if organization already exists for this user
|
|
|
|
|
for _, id := range user {
|
|
|
|
|
if id == orgId {
|
|
|
|
|
return // Already exists, no need to add
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
c.users[sub] = append(user, orgId)
|
2023-04-27 07:09:10 +02:00
|
|
|
}
|
|
|
|
|
|
2025-04-12 10:43:40 +02:00
|
|
|
func New(logger *slog.Logger) *Cache {
|
2022-10-09 15:23:52 +02:00
|
|
|
return &Cache{
|
2023-04-27 07:09:10 +02:00
|
|
|
organizations: make(map[string]domain.Organization),
|
|
|
|
|
users: make(map[string][]string),
|
|
|
|
|
apiKeys: make(map[string]domain.APIKey),
|
|
|
|
|
services: make(map[string]map[string]map[string]struct{}),
|
|
|
|
|
subGraphs: make(map[string]string),
|
|
|
|
|
lastUpdate: make(map[string]string),
|
|
|
|
|
logger: logger,
|
2022-10-09 15:23:52 +02:00
|
|
|
}
|
|
|
|
|
}
|
2023-04-27 07:09:10 +02:00
|
|
|
|
|
|
|
|
func refKey(orgId string, ref string) string {
|
|
|
|
|
return fmt.Sprintf("%s<->%s", orgId, ref)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func subGraphKey(orgId string, ref string, service string) string {
|
|
|
|
|
return fmt.Sprintf("%s<->%s<->%s", orgId, ref, service)
|
|
|
|
|
}
|
2025-11-20 22:11:17 +01:00
|
|
|
|
|
|
|
|
func apiKeyId(orgId string, name string) string {
|
|
|
|
|
return fmt.Sprintf("%s<->%s", orgId, name)
|
|
|
|
|
}
|