fix: new implementation

changes:
- Remove cli
  Some cli commands are not complete tested and are deprecated.

- Daemon
  - Old version has a very bad implementation of how to verify, if the
    device or the sensors are in the database insert. The current
    implementation can be improved but this one is betten then the old
    one.
  - Remove complete the cache store implementation. Use a normal array
    and query the length and capacity to determine how the array cache
    must be cleaned.

- Type
  Remove unused types and functions
This commit is contained in:
2020-05-03 14:04:08 +02:00
parent 84d052184e
commit fb8d4dd5eb
137 changed files with 658 additions and 14048 deletions

View File

@ -1,35 +0,0 @@
package db
import (
"database/sql"
"errors"
"net/url"
_ "github.com/lib/pq"
"github.com/volker-raschek/go-logger/pkg/logger"
)
var (
errorUnsupportedDatabase = errors.New("Unsupported database scheme")
flogger = logger.NewSilentLogger()
)
func New(storageEndpoint *url.URL) (Database, error) {
switch storageEndpoint.Scheme {
case "postgres":
newDBO, err := sql.Open(storageEndpoint.Scheme, storageEndpoint.String())
if err != nil {
return nil, err
}
return &Postgres{
dbo: newDBO,
}, nil
default:
return nil, errorUnsupportedDatabase
}
}
func SetLogger(logger logger.Logger) {
flogger = logger
}

View File

@ -1,38 +0,0 @@
package db
import (
"net/url"
"testing"
"github.com/stretchr/testify/require"
)
func TestNew(t *testing.T) {
require := require.New(t)
validStorageEndpoints := []string{
"postgres://flucky:flucky@markus-pc.trier.cryptic.systems/postgres?sslmode=disable",
}
unsupportedStorageEndpoints := []string{
"html://flucky.cryptic.systems",
"oracle://flucky:flucky@example.com/postgres",
}
for _, validStorageEndpoint := range validStorageEndpoints {
storageEndpointURL, err := url.Parse(validStorageEndpoint)
require.Nil(err)
dbo, err := New(storageEndpointURL)
require.Nil(err)
err = dbo.Close()
require.Nil(err)
}
for _, unsupportedStorageEndpoint := range unsupportedStorageEndpoints {
storageEndpointURL, err := url.Parse(unsupportedStorageEndpoint)
require.Nil(err)
_, err = New(storageEndpointURL)
require.Equal(errorUnsupportedDatabase, err)
}
}

View File

@ -1,18 +0,0 @@
package db
import (
"errors"
)
var (
errorBeginTransaction = errors.New("Failed to start new transaction")
errorGetAsset = errors.New("Failed to get asset from go-bindata")
errorNoRowsAffected = errors.New("No rows affected")
errorRowNotFound = errors.New("Failed to find row by given ID")
errorPrepareStatement = errors.New("Failed to prepare sql statement")
errorRollbackTransaction = errors.New("Failed to rollback transaction")
errorScanRow = errors.New("Failed to scan row")
errorStatementExecute = errors.New("Failed to execute statement")
errorStatementQuery = errors.New("Failed to query statement")
errorUnknownMeasuredValueType = errors.New("Unknown measured value type")
)

View File

@ -1,48 +0,0 @@
package db
import (
"context"
"github.com/Masterminds/semver"
"github.com/volker-raschek/flucky/pkg/types"
)
type Database interface {
// Close DB Connction
Close() error
// Schema
Schema(ctx context.Context, version *semver.Version) error
// Delete
DeleteDevices(ctx context.Context, devices []*types.Device) error
DeleteInfo(ctx context.Context, key string) error
DeleteMeasuredValues(ctx context.Context, measuredValues []*types.MeasuredValue) error
DeleteSensors(ctx context.Context, sensors []*types.Sensor) error
// Insert
InsertDevices(ctx context.Context, devices []*types.Device) error
InsertInfo(ctx context.Context, key string, value string) error
InsertMeasuredValues(ctx context.Context, measuredValues []*types.MeasuredValue) error
InsertSensors(ctx context.Context, sensors []*types.Sensor) error
// Select
SelectDeviceByID(ctx context.Context, id string) (*types.Device, error)
SelectHumidities(ctx context.Context) ([]*types.MeasuredValue, error)
SelectHumidityByID(ctx context.Context, id string) (*types.MeasuredValue, error)
SelectInfo(ctx context.Context, key string) (string, error)
SelectMeasuredValues(ctx context.Context) ([]*types.MeasuredValue, error)
SelectMeasuredValuesByIDAndType(ctx context.Context, id string, valueType types.MeasuredValueType) (*types.MeasuredValue, error)
SelectPressures(ctx context.Context) ([]*types.MeasuredValue, error)
SelectPressureByID(ctx context.Context, id string) (*types.MeasuredValue, error)
SelectSensorByID(ctx context.Context, id string) (*types.Sensor, error)
SelectTemperatures(ctx context.Context) ([]*types.MeasuredValue, error)
SelectTemperatureByID(ctx context.Context, id string) (*types.MeasuredValue, error)
// Update
UpdateDevices(ctx context.Context, devices []*types.Device) error
UpdateInfo(ctx context.Context, key string, value string) error
UpdateMeasuredValues(ctx context.Context, measuredValues []*types.MeasuredValue) error
UpdateSensors(ctx context.Context, sensots []*types.Sensor) error
}

View File

@ -1,713 +0,0 @@
package db
import (
"context"
"database/sql"
"fmt"
"path/filepath"
"sort"
"strings"
"github.com/Masterminds/semver"
"github.com/volker-raschek/flucky/pkg/types"
// PostgreSQL lib
_ "github.com/lib/pq"
)
var (
postgresAssetPath = "pkg/storage/db/sql/psql"
)
// Postgres provide functions to interact with a postgres database
type Postgres struct {
dbo *sql.DB
}
// Close the database connection
func (p *Postgres) Close() error {
return p.dbo.Close()
}
// Schema create or updates the database schema to a given version. Normally the
// version is the same as the flucky binary version.
func (p *Postgres) Schema(ctx context.Context, version *semver.Version) error {
schemaFunc := func(ctx context.Context, fromVersion *semver.Version, toVersion *semver.Version) error {
assetPath := fmt.Sprintf("%v/schema", postgresAssetPath)
sqlAssetFiles, err := AssetDir(assetPath)
if err != nil {
return fmt.Errorf("Can not restore asset directory %v: %v", assetPath, err)
}
postgreSQLVersionChanges := make(map[*semver.Version]string, 0)
postgreSQLVersions := make([]*semver.Version, len(sqlAssetFiles))
for i, sqlAssetFile := range sqlAssetFiles {
fileSemVersion, err := semver.NewVersion(strings.ReplaceAll(sqlAssetFile, ".sql", ""))
if err != nil {
return fmt.Errorf("Can not create semantic version from file asset %v: %v", sqlAssetFile, err)
}
postgreSQLVersionChanges[fileSemVersion] = sqlAssetFile
postgreSQLVersions[i] = fileSemVersion
}
sort.Sort(semver.Collection(postgreSQLVersions))
for i, postgreSQLVersion := range postgreSQLVersions {
if fromVersion != nil {
if postgreSQLVersion.LessThan(fromVersion) || postgreSQLVersion.Equal(fromVersion) {
flogger.Debug("SKIP: PostgreSQL schema version '%v' is less or eqal then the local version changes '%v'", postgreSQLVersion.String(), fromVersion.String())
continue
}
}
asset := postgreSQLVersionChanges[postgreSQLVersion]
queryBytes, err := Asset(filepath.Join(assetPath, asset))
if err != nil {
return fmt.Errorf("Can not restore asset %v, %v", asset, err)
}
query := string(queryBytes)
if _, err := p.dbo.ExecContext(ctx, query); err != nil {
return fmt.Errorf("%v: %v", errorStatementExecute, err)
}
if i == 0 {
if err := p.InsertInfo(ctx, "version", postgreSQLVersion.String()); err != nil {
return fmt.Errorf("Can not insert version %v into info table: %v", postgreSQLVersion.String(), err)
}
} else {
if err := p.UpdateInfo(ctx, "version", postgreSQLVersion.String()); err != nil {
return fmt.Errorf("Can not update version %v into info table: %v", postgreSQLVersion.String(), err)
}
}
}
return nil
}
dbVersion, err := p.SelectInfo(ctx, "version")
if err != nil {
// can not select version from database, maybe the schema is not initialize
// create db schema for the current flucky version
return schemaFunc(ctx, nil, version)
} else {
fromVersion, err := semver.NewVersion(dbVersion)
if err != nil {
return fmt.Errorf("Can not create semantic version from database entry %v: %v", dbVersion, err)
}
return schemaFunc(ctx, fromVersion, version)
}
}
// DeleteDevices delete recursively all spicified devices, including sensors and
// all measured values
func (p *Postgres) DeleteDevices(ctx context.Context, devices []*types.Device) error {
asset := fmt.Sprintf("%v/deleteDevice.sql", postgresAssetPath)
queryBytes, err := Asset(asset)
if err != nil {
return fmt.Errorf("%v: %v", errorGetAsset, err)
}
query := string(queryBytes)
stmt, err := p.dbo.PrepareContext(ctx, query)
if err != nil {
return fmt.Errorf("%v: %v", errorPrepareStatement, err)
}
defer stmt.Close()
for _, device := range devices {
_, err := stmt.ExecContext(ctx, &device.ID)
if err != nil {
return fmt.Errorf("%v: %v", errorStatementExecute, err)
}
}
return nil
}
// DeleteSensors delete recusively all spicified sensors, including all measured
// values
func (p *Postgres) DeleteSensors(ctx context.Context, sensors []*types.Sensor) error {
asset := fmt.Sprintf("%v/deleteSensor.sql", postgresAssetPath)
queryBytes, err := Asset(asset)
if err != nil {
return fmt.Errorf("%v: %v", errorGetAsset, err)
}
query := string(queryBytes)
stmt, err := p.dbo.PrepareContext(ctx, query)
if err != nil {
return fmt.Errorf("%v: %v", errorPrepareStatement, err)
}
defer stmt.Close()
for _, sensor := range sensors {
_, err := stmt.ExecContext(ctx, &sensor.ID)
if err != nil {
return fmt.Errorf("%v: %v", errorStatementExecute, err)
}
}
return nil
}
// DeleteInfo delete a key with his value
func (p *Postgres) DeleteInfo(ctx context.Context, key string) error {
asset := fmt.Sprintf("%v/deleteInfo.sql", postgresAssetPath)
queryBytes, err := Asset(asset)
if err != nil {
return fmt.Errorf("%v: %v", errorGetAsset, err)
}
query := string(queryBytes)
stmt, err := p.dbo.PrepareContext(ctx, query)
if err != nil {
return fmt.Errorf("%v: %v", errorPrepareStatement, err)
}
defer stmt.Close()
_, err = stmt.ExecContext(ctx, &key)
if err != nil {
return fmt.Errorf("%v: %v", errorStatementExecute, err)
}
return nil
}
// DeleteMeasuredValues delete all spicified measured values
func (p *Postgres) DeleteMeasuredValues(ctx context.Context, measuredValues []*types.MeasuredValue) error {
deleteMeasuredValue := func(ctx context.Context, query string, measuredValues []*types.MeasuredValue) error {
stmt, err := p.dbo.PrepareContext(ctx, query)
if err != nil {
return fmt.Errorf("%v: %v", errorPrepareStatement, err)
}
defer stmt.Close()
for _, measuredValue := range measuredValues {
_, err := stmt.ExecContext(ctx, &measuredValue.ID)
if err != nil {
return fmt.Errorf("%v: %v", errorStatementExecute, err)
}
}
return nil
}
sortedMeasuredValueTypes := make(map[types.MeasuredValueType][]*types.MeasuredValue)
for _, measuredValue := range measuredValues {
if _, ok := sortedMeasuredValueTypes[measuredValue.ValueType]; !ok {
sortedMeasuredValueTypes[measuredValue.ValueType] = make([]*types.MeasuredValue, 0)
}
sortedMeasuredValueTypes[measuredValue.ValueType] = append(sortedMeasuredValueTypes[measuredValue.ValueType], measuredValue)
}
assetFunc := func(queryFile string) (string, error) {
queryBytes, err := Asset(queryFile)
if err != nil {
return "", fmt.Errorf("%v: %v", errorGetAsset, err)
}
return string(queryBytes), nil
}
for measuredValueType, sortedMeasuredValues := range sortedMeasuredValueTypes {
switch measuredValueType {
case types.MeasuredValueTypeHumidity:
query, err := assetFunc(fmt.Sprintf("%v/deleteHumidity.sql", postgresAssetPath))
if err != nil {
return err
}
if err := deleteMeasuredValue(ctx, query, sortedMeasuredValues); err != nil {
return err
}
case types.MeasuredValueTypePressure:
query, err := assetFunc(fmt.Sprintf("%v/deletePressure.sql", postgresAssetPath))
if err != nil {
return err
}
if err := deleteMeasuredValue(ctx, query, sortedMeasuredValues); err != nil {
return err
}
case types.MeasuredValueTypeTemperature:
query, err := assetFunc(fmt.Sprintf("%v/deleteTemperature.sql", postgresAssetPath))
if err != nil {
return err
}
if err := deleteMeasuredValue(ctx, query, sortedMeasuredValues); err != nil {
return err
}
}
}
return nil
}
// InsertDevices insert all specified devices into the database
func (p *Postgres) InsertDevices(ctx context.Context, devices []*types.Device) error {
asset := fmt.Sprintf("%v/insertDevice.sql", postgresAssetPath)
queryBytes, err := Asset(asset)
if err != nil {
return fmt.Errorf("%v: %v", errorGetAsset, err)
}
query := string(queryBytes)
stmt, err := p.dbo.PrepareContext(ctx, query)
if err != nil {
return fmt.Errorf("%v: %v", errorPrepareStatement, err)
}
defer stmt.Close()
for _, device := range devices {
_, err := stmt.ExecContext(ctx, &device.ID, &device.Name, &device.Location, &device.CreationDate)
if err != nil {
return fmt.Errorf("%v: %v", errorStatementExecute, err)
}
}
return nil
}
// InsertInfo insert into the database additional informations, based on a key value syntax
func (p *Postgres) InsertInfo(ctx context.Context, key string, value string) error {
asset := fmt.Sprintf("%v/insertInfo.sql", postgresAssetPath)
queryBytes, err := Asset(asset)
if err != nil {
return fmt.Errorf("%v: %v", errorGetAsset, err)
}
query := string(queryBytes)
stmt, err := p.dbo.PrepareContext(ctx, query)
if err != nil {
return fmt.Errorf("%v: %v", errorPrepareStatement, err)
}
defer stmt.Close()
_, err = stmt.ExecContext(ctx, key, value)
if err != nil {
return fmt.Errorf("%v: %v", errorStatementExecute, err)
}
return nil
}
// InsertMeasuredValues insert all specified measured values into the database
func (p *Postgres) InsertMeasuredValues(ctx context.Context, measuredValues []*types.MeasuredValue) error {
sortedMeasuredValueTypes := make(map[types.MeasuredValueType][]*types.MeasuredValue)
for _, measuredValue := range measuredValues {
if _, ok := sortedMeasuredValueTypes[measuredValue.ValueType]; !ok {
sortedMeasuredValueTypes[measuredValue.ValueType] = make([]*types.MeasuredValue, 0)
}
sortedMeasuredValueTypes[measuredValue.ValueType] = append(sortedMeasuredValueTypes[measuredValue.ValueType], measuredValue)
}
for measuredValueType, sortedMeasuredValues := range sortedMeasuredValueTypes {
switch measuredValueType {
case types.MeasuredValueTypeHumidity:
if err := p.insertHumidity(ctx, sortedMeasuredValues); err != nil {
return err
}
case types.MeasuredValueTypePressure:
if err := p.insertPressure(ctx, sortedMeasuredValues); err != nil {
return err
}
case types.MeasuredValueTypeTemperature:
if err := p.insertTemperature(ctx, sortedMeasuredValues); err != nil {
return err
}
}
}
return nil
}
func (p *Postgres) insertHumidity(ctx context.Context, measuredValues []*types.MeasuredValue) error {
asset := fmt.Sprintf("%v/insertHumidity.sql", postgresAssetPath)
queryBytes, err := Asset(asset)
if err != nil {
return fmt.Errorf("%v: %v", errorGetAsset, err)
}
query := string(queryBytes)
stmt, err := p.dbo.PrepareContext(ctx, query)
if err != nil {
return fmt.Errorf("%v: %v", errorPrepareStatement, err)
}
defer stmt.Close()
for _, measuredValue := range measuredValues {
if measuredValue.ValueType != types.MeasuredValueTypeHumidity {
continue
}
_, err := stmt.ExecContext(ctx, &measuredValue.ID, &measuredValue.Value, &measuredValue.FromDate, &measuredValue.TillDate, &measuredValue.SensorID, &measuredValue.CreationDate, &measuredValue.UpdateDate)
if err != nil {
return fmt.Errorf("%v: %v", errorStatementExecute, err)
}
}
return nil
}
func (p *Postgres) insertPressure(ctx context.Context, measuredValues []*types.MeasuredValue) error {
asset := fmt.Sprintf("%v/insertPressure.sql", postgresAssetPath)
queryBytes, err := Asset(asset)
if err != nil {
return fmt.Errorf("%v: %v", errorGetAsset, err)
}
query := string(queryBytes)
stmt, err := p.dbo.PrepareContext(ctx, query)
if err != nil {
return fmt.Errorf("%v: %v", errorPrepareStatement, err)
}
defer stmt.Close()
for _, measuredValue := range measuredValues {
if measuredValue.ValueType != types.MeasuredValueTypePressure {
continue
}
_, err := stmt.ExecContext(ctx, &measuredValue.ID, &measuredValue.Value, &measuredValue.FromDate, &measuredValue.TillDate, &measuredValue.SensorID, &measuredValue.CreationDate, &measuredValue.UpdateDate)
if err != nil {
return fmt.Errorf("%v: Measured value id %v: %v", errorStatementExecute, measuredValue.ID, err)
}
}
return nil
}
func (p *Postgres) insertTemperature(ctx context.Context, measuredValues []*types.MeasuredValue) error {
asset := fmt.Sprintf("%v/insertTemperature.sql", postgresAssetPath)
queryBytes, err := Asset(asset)
if err != nil {
return fmt.Errorf("%v: %v", errorGetAsset, err)
}
query := string(queryBytes)
stmt, err := p.dbo.PrepareContext(ctx, query)
if err != nil {
return fmt.Errorf("%v: %v", errorPrepareStatement, err)
}
defer stmt.Close()
for _, measuredValue := range measuredValues {
if measuredValue.ValueType != types.MeasuredValueTypeTemperature {
continue
}
_, err := stmt.ExecContext(ctx, &measuredValue.ID, &measuredValue.Value, &measuredValue.FromDate, &measuredValue.TillDate, &measuredValue.SensorID, &measuredValue.CreationDate, &measuredValue.UpdateDate)
if err != nil {
return fmt.Errorf("%v: Measured value id %v: %v", errorStatementExecute, measuredValue.ID, err)
}
}
return nil
}
// InsertSensors insert all specified sensors into the database
func (p *Postgres) InsertSensors(ctx context.Context, sensors []*types.Sensor) error {
asset := fmt.Sprintf("%v/insertSensor.sql", postgresAssetPath)
queryBytes, err := Asset(asset)
if err != nil {
return fmt.Errorf("%v: %v", errorGetAsset, err)
}
query := string(queryBytes)
stmt, err := p.dbo.PrepareContext(ctx, query)
if err != nil {
return fmt.Errorf("%v: %v", errorPrepareStatement, err)
}
defer stmt.Close()
for _, sensor := range sensors {
_, err := stmt.ExecContext(ctx, &sensor.ID, &sensor.Name, &sensor.Location, &sensor.WireID, &sensor.I2CBus, &sensor.I2CAddress, &sensor.GPIONumber, &sensor.Model, &sensor.Enabled, &sensor.DeviceID, &sensor.CreationDate)
if err != nil {
return fmt.Errorf("%v: %v", errorStatementExecute, err)
}
}
return nil
}
// SelectDeviceByID returns a device by his ID
func (p *Postgres) SelectDeviceByID(ctx context.Context, id string) (*types.Device, error) {
asset := fmt.Sprintf("%v/selectDeviceByID.sql", postgresAssetPath)
queryBytes, err := Asset(asset)
if err != nil {
return nil, fmt.Errorf("%v: %v", errorGetAsset, err)
}
query := string(queryBytes)
stmt, err := p.dbo.PrepareContext(ctx, query)
if err != nil {
return nil, fmt.Errorf("%v: %v", errorPrepareStatement, err)
}
row := stmt.QueryRowContext(ctx, id)
if row == nil {
return nil, errorRowNotFound
}
device := new(types.Device)
err = row.Scan(&device.ID, &device.Name, &device.Location, &device.CreationDate)
if err != nil {
return nil, fmt.Errorf("%v: %v", errorScanRow, err)
}
return device, nil
}
// SelectInfo returns the value of a key stored in the database
func (p *Postgres) SelectInfo(ctx context.Context, key string) (string, error) {
asset := fmt.Sprintf("%v/selectInfo.sql", postgresAssetPath)
queryBytes, err := Asset(asset)
if err != nil {
return "", fmt.Errorf("%v: %v", errorGetAsset, err)
}
query := string(queryBytes)
stmt, err := p.dbo.PrepareContext(ctx, query)
if err != nil {
return "", fmt.Errorf("%v: %v", errorPrepareStatement, err)
}
row := stmt.QueryRowContext(ctx, key)
if row == nil {
return "", errorRowNotFound
}
value := ""
err = row.Scan(&value)
if err != nil {
return "", fmt.Errorf("%v: %v", errorScanRow, err)
}
return value, nil
}
// SelectHumidities returns humidity values
func (p *Postgres) SelectHumidities(ctx context.Context) ([]*types.MeasuredValue, error) {
queryFile := fmt.Sprintf("%v/selectHumidities.sql", postgresAssetPath)
measuredValues, err := p.selectMeasuredValues(ctx, types.MeasuredValueTypeHumidity, queryFile, nil)
if err != nil {
return nil, err
}
return measuredValues, nil
}
// SelectHumidityByID returns a humidity value by his ID
func (p *Postgres) SelectHumidityByID(ctx context.Context, id string) (*types.MeasuredValue, error) {
queryFile := fmt.Sprintf("%v/selectHumidityByID.sql", postgresAssetPath)
args := []interface{}{id}
measuredValues, err := p.selectMeasuredValues(ctx, types.MeasuredValueTypeHumidity, queryFile, args)
if err != nil {
return nil, err
}
if len(measuredValues) == 0 {
return nil, fmt.Errorf("%v: %v", errorRowNotFound, id)
}
return measuredValues[0], nil
}
// SelectMeasuredValues returns all measured values about all diffferent value
// types
func (p *Postgres) SelectMeasuredValues(ctx context.Context) ([]*types.MeasuredValue, error) {
measuredValues := make([]*types.MeasuredValue, 0)
// MeasuredValue query functions
queryFunctions := []func(ctx context.Context) ([]*types.MeasuredValue, error){
p.SelectHumidities,
p.SelectPressures,
p.SelectTemperatures,
}
// Execute query functions
for _, queryFunction := range queryFunctions {
queriedMeasuredValues, err := queryFunction(ctx)
if err != nil {
return nil, err
}
measuredValues = append(measuredValues, queriedMeasuredValues...)
}
return measuredValues, nil
}
// SelectMeasuredValuesByIDAndType returns a measured value by his ID and type
func (p *Postgres) SelectMeasuredValuesByIDAndType(ctx context.Context, id string, valueType types.MeasuredValueType) (*types.MeasuredValue, error) {
switch valueType {
case types.MeasuredValueTypeHumidity:
return p.SelectHumidityByID(ctx, id)
case types.MeasuredValueTypePressure:
return p.SelectPressureByID(ctx, id)
case types.MeasuredValueTypeTemperature:
return p.SelectTemperatureByID(ctx, id)
default:
return nil, fmt.Errorf("%v: %v", errorUnknownMeasuredValueType, valueType)
}
}
// SelectPressures returns pressure values
func (p *Postgres) SelectPressures(ctx context.Context) ([]*types.MeasuredValue, error) {
queryFile := fmt.Sprintf("%v/selectPressures.sql", postgresAssetPath)
measuredValues, err := p.selectMeasuredValues(ctx, types.MeasuredValueTypePressure, queryFile, nil)
if err != nil {
return nil, err
}
return measuredValues, nil
}
// SelectPressureByID returns a pressure value by his ID
func (p *Postgres) SelectPressureByID(ctx context.Context, id string) (*types.MeasuredValue, error) {
queryFile := fmt.Sprintf("%v/selectPressureByID.sql", postgresAssetPath)
args := []interface{}{id}
measuredValues, err := p.selectMeasuredValues(ctx, types.MeasuredValueTypePressure, queryFile, args)
if err != nil {
return nil, err
}
if len(measuredValues) == 0 {
return nil, fmt.Errorf("%v: %v", errorRowNotFound, id)
}
return measuredValues[0], nil
}
// SelectSensorByID returns a sensor by his ID
func (p *Postgres) SelectSensorByID(ctx context.Context, id string) (*types.Sensor, error) {
asset := fmt.Sprintf("%v/selectSensorByID.sql", postgresAssetPath)
queryBytes, err := Asset(asset)
if err != nil {
return nil, fmt.Errorf("%v: %v", errorGetAsset, err)
}
query := string(queryBytes)
stmt, err := p.dbo.PrepareContext(ctx, query)
if err != nil {
return nil, fmt.Errorf("%v: %v", errorPrepareStatement, err)
}
row := stmt.QueryRowContext(ctx, id)
if row == nil {
return nil, errorRowNotFound
}
sensor := new(types.Sensor)
err = row.Scan(&sensor.ID, &sensor.Name, &sensor.Location, &sensor.WireID, &sensor.I2CBus, &sensor.I2CAddress, &sensor.GPIONumber, &sensor.Model, &sensor.Enabled, &sensor.DeviceID, &sensor.CreationDate)
if err != nil {
return nil, fmt.Errorf("%v: %v", errorScanRow, err)
}
return sensor, nil
}
// SelectTemperatures returns temperature values
func (p *Postgres) SelectTemperatures(ctx context.Context) ([]*types.MeasuredValue, error) {
queryFile := fmt.Sprintf("%v/selectTemperatures.sql", postgresAssetPath)
measuredValues, err := p.selectMeasuredValues(ctx, types.MeasuredValueTypeTemperature, queryFile, nil)
if err != nil {
return nil, err
}
return measuredValues, nil
}
// SelectTemperatureByID returns a temperature value by his ID
func (p *Postgres) SelectTemperatureByID(ctx context.Context, id string) (*types.MeasuredValue, error) {
queryFile := fmt.Sprintf("%v/selectTemperatureByID.sql", postgresAssetPath)
args := []interface{}{id}
measuredValues, err := p.selectMeasuredValues(ctx, types.MeasuredValueTypeTemperature, queryFile, args)
if err != nil {
return nil, err
}
if len(measuredValues) == 0 {
return nil, fmt.Errorf("%v: %v", errorRowNotFound, id)
}
return measuredValues[0], nil
}
func (p *Postgres) selectMeasuredValues(ctx context.Context, measuredValueType types.MeasuredValueType, queryFile string, queryArgs []interface{}) ([]*types.MeasuredValue, error) {
queryBytes, err := Asset(queryFile)
if err != nil {
return nil, fmt.Errorf("%v: %v", errorGetAsset, err)
}
query := string(queryBytes)
stmt, err := p.dbo.PrepareContext(ctx, query)
if err != nil {
return nil, fmt.Errorf("%v: %v", errorPrepareStatement, err)
}
rows, err := stmt.QueryContext(ctx, queryArgs...)
if err != nil {
return nil, fmt.Errorf("%v: %v", errorStatementQuery, err)
}
measuredValues := make([]*types.MeasuredValue, 0)
for rows.Next() {
measuredValue := new(types.MeasuredValue)
measuredValue.ValueType = measuredValueType
rows.Scan(&measuredValue.ID, &measuredValue.Value, &measuredValue.FromDate, &measuredValue.TillDate, &measuredValue.SensorID, &measuredValue.CreationDate, &measuredValue.UpdateDate)
measuredValues = append(measuredValues, measuredValue)
}
return measuredValues, nil
}
// UpdateDevices updates all specified devices into the database
func (p *Postgres) UpdateDevices(ctx context.Context, devices []*types.Device) error {
return nil
}
// UpdateInfo updates the value which is stored to a key in the database
func (p *Postgres) UpdateInfo(ctx context.Context, key string, value string) error {
asset := fmt.Sprintf("%v/updateInfo.sql", postgresAssetPath)
queryBytes, err := Asset(asset)
if err != nil {
return fmt.Errorf("%v: %v", errorGetAsset, err)
}
query := string(queryBytes)
stmt, err := p.dbo.PrepareContext(ctx, query)
if err != nil {
return fmt.Errorf("%v: %v", errorPrepareStatement, err)
}
defer stmt.Close()
res, err := stmt.ExecContext(ctx, key, value)
if err != nil {
return fmt.Errorf("%v: %v", errorStatementExecute, err)
}
affected, err := res.RowsAffected()
if err != nil {
return err
}
if affected == 0 {
return errorNoRowsAffected
}
return nil
}
// UpdateMeasuredValues updates the measured values which are stored in the database
func (p *Postgres) UpdateMeasuredValues(ctx context.Context, measuredValues []*types.MeasuredValue) error {
return nil
}
// UpdateSensors updates the sensors which are stored in the database
func (p *Postgres) UpdateSensors(ctx context.Context, sensots []*types.Sensor) error {
return nil
}

View File

@ -1,397 +0,0 @@
package db_test
import (
"context"
"net/url"
"strings"
"testing"
"github.com/Masterminds/semver"
"github.com/stretchr/testify/require"
"github.com/volker-raschek/flucky/pkg/storage/db"
"github.com/volker-raschek/flucky/pkg/types"
"github.com/volker-raschek/flucky/test/goldenfiles"
)
type test struct {
Name string
Test func(*testing.T)
}
var (
database db.Database
postgresContainerImage string = "docker.io/postgres/postgres"
storageEndpointString string = "postgres://flucky:flucky@markus-pc.trier.cryptic.systems/postgres?sslmode=disable"
goldenDevicesFilePath string = "test/goldenfiles/json/goldenDevices.json"
goldenSensorsFilePath string = "test/goldenfiles/json/goldenSensors.json"
goldenMeasuredValuesFilePath string = "test/goldenfiles/json/goldenMeasuredValuesUncompressedRounded.json"
goldenPressuresFilePath string = "test/goldenfiles/json/goldenPressuresUncompressedRounded.json"
goldenHumiditiesFilePath string = "test/goldenfiles/json/goldenHumiditiesUncompressedRounded.json"
goldenTemperaturesFilePath string = "test/goldenfiles/json/goldenTemperaturesUncompressedRounded.json"
goldenDevices []*types.Device
goldenSensors []*types.Sensor
goldenMeasuredValues []*types.MeasuredValue
goldenPressures []*types.MeasuredValue
goldenHumidites []*types.MeasuredValue
goldenTemperatures []*types.MeasuredValue
)
func load(t *testing.T) {
require := require.New(t)
d, err := goldenfiles.GetGoldenDevices(goldenDevicesFilePath)
require.NoError(err)
goldenDevices = d
s, err := goldenfiles.GetGoldenSensors(goldenSensorsFilePath)
require.NoError(err)
goldenSensors = s
hum, err := goldenfiles.GetGoldenMeasuredValues(goldenHumiditiesFilePath)
require.NoError(err)
goldenHumidites = hum
mv, err := goldenfiles.GetGoldenMeasuredValues(goldenMeasuredValuesFilePath)
require.NoError(err)
goldenMeasuredValues = mv
pres, err := goldenfiles.GetGoldenMeasuredValues(goldenPressuresFilePath)
require.NoError(err)
goldenPressures = pres
temp, err := goldenfiles.GetGoldenMeasuredValues(goldenTemperaturesFilePath)
require.NoError(err)
goldenTemperatures = temp
}
func TestPostgres(t *testing.T) {
require := require.New(t)
load(t)
storageEndpoint, err := url.Parse(storageEndpointString)
require.Nil(err)
db, err := db.New(storageEndpoint)
database = db
require.Nil(err)
tests := []*test{
&test{
Name: "schema",
Test: testSchemaCreate,
},
&test{
Name: "insertInfo",
Test: testInsertInfo,
},
&test{
Name: "insertDevices",
Test: testInsertDevices,
},
&test{
Name: "insertSensors",
Test: testInsertSensors,
},
&test{
Name: "insertHumidity",
Test: testInsertHumidity,
},
&test{
Name: "insertPressure",
Test: testInsertPressure,
},
&test{
Name: "insertTemperatures",
Test: testInsertTemperatures,
},
&test{
Name: "selectHumidities",
Test: testSelectHumidities,
},
&test{
Name: "selectPressures",
Test: testSelectPressures,
},
&test{
Name: "selectTemperatures",
Test: testSelectTemperatures,
},
// &test{
// Name: "selectMeasuredValues",
// Test: testSelectMeasuredValues,
// },
&test{
Name: "deleteHumidities",
Test: testDeleteHumidity,
},
&test{
Name: "deleteInfo",
Test: testDeleteInfo,
},
&test{
Name: "deletePressures",
Test: testDeletePressures,
},
&test{
Name: "deleteTemperatures",
Test: testDeleteTemperatures,
},
&test{
Name: "insertMeasuredValues",
Test: testInsertMeasuredValues,
},
&test{
Name: "deleteMeasuredValues",
Test: testDeleteMeasuredValues,
},
&test{
Name: "deleteSensors",
Test: testDeleteSensors,
},
&test{
Name: "deleteDevices",
Test: testDeleteDevices,
},
&test{
Name: "updateInfo",
Test: testUpdateInfo,
},
}
for _, test := range tests {
t.Run(test.Name, test.Test)
}
}
func testSchemaCreate(t *testing.T) {
require := require.New(t)
homePath := "pkg/storage/db/sql/psql/schema"
sqlAssetFiles, err := db.AssetDir(homePath)
require.NoError(err)
ctx := context.Background()
for _, sqlAssetFile := range sqlAssetFiles {
fromVersion, err := semver.NewVersion(strings.ReplaceAll(sqlAssetFile, ".sql", ""))
require.NoError(err)
err = database.Schema(ctx, fromVersion)
require.NoError(err)
}
}
func testInsertDevices(t *testing.T) {
require := require.New(t)
ctx := context.Background()
err := database.InsertDevices(ctx, goldenDevices)
require.NoError(err)
for _, goldenDevice := range goldenDevices {
testDevice, err := database.SelectDeviceByID(ctx, goldenDevice.ID)
require.NoError(err)
goldenfiles.CompareMeasuredValues(t, goldenDevice, testDevice)
}
}
func testInsertSensors(t *testing.T) {
require := require.New(t)
ctx := context.Background()
err := database.InsertSensors(ctx, goldenSensors)
require.NoError(err)
for _, goldenSensor := range goldenSensors {
testSensor, err := database.SelectSensorByID(ctx, goldenSensor.ID)
require.NoError(err)
goldenfiles.CompareMeasuredValues(t, goldenSensor, testSensor)
}
}
func testInsertHumidity(t *testing.T) {
require := require.New(t)
ctx := context.Background()
err := database.InsertMeasuredValues(ctx, goldenHumidites)
require.NoError(err)
for _, goldenHumidity := range goldenHumidites {
testHumidity, err := database.SelectHumidityByID(ctx, goldenHumidity.ID)
require.NoError(err)
goldenfiles.CompareMeasuredValues(t, []*types.MeasuredValue{testHumidity}, []*types.MeasuredValue{testHumidity})
}
}
func testInsertInfo(t *testing.T) {
require := require.New(t)
ctx := context.Background()
err := database.InsertInfo(ctx, "test", "value")
require.NoError(err)
value, err := database.SelectInfo(ctx, "test")
require.NoError(err)
require.Equal("value", value)
}
func testInsertMeasuredValues(t *testing.T) {
require := require.New(t)
ctx := context.Background()
err := database.InsertMeasuredValues(ctx, goldenMeasuredValues)
require.NoError(err)
for _, goldenMeasuredValue := range goldenMeasuredValues {
testMeasuredValue, err := database.SelectMeasuredValuesByIDAndType(ctx, goldenMeasuredValue.ID, goldenMeasuredValue.ValueType)
require.NoError(err)
goldenfiles.CompareMeasuredValues(t, []*types.MeasuredValue{goldenMeasuredValue}, []*types.MeasuredValue{testMeasuredValue})
}
}
func testInsertPressure(t *testing.T) {
require := require.New(t)
ctx := context.Background()
err := database.InsertMeasuredValues(ctx, goldenPressures)
require.NoError(err)
for _, goldenPressure := range goldenPressures {
testPressure, err := database.SelectPressureByID(ctx, goldenPressure.ID)
require.NoError(err)
goldenfiles.CompareMeasuredValues(t, []*types.MeasuredValue{testPressure}, []*types.MeasuredValue{testPressure})
}
}
func testInsertTemperatures(t *testing.T) {
require := require.New(t)
ctx := context.Background()
err := database.InsertMeasuredValues(ctx, goldenTemperatures)
require.NoError(err)
for _, goldenTemperature := range goldenTemperatures {
testTemperature, err := database.SelectTemperatureByID(ctx, goldenTemperature.ID)
require.NoError(err)
goldenfiles.CompareMeasuredValues(t, []*types.MeasuredValue{goldenTemperature}, []*types.MeasuredValue{testTemperature})
}
}
func testSelectHumidities(t *testing.T) {
require := require.New(t)
ctx := context.Background()
humidities, err := database.SelectHumidities(ctx)
require.NoError(err)
goldenfiles.CompareMeasuredValues(t, goldenHumidites, humidities)
}
func testSelectMeasuredValues(t *testing.T) {
require := require.New(t)
ctx := context.Background()
measuredValues, err := database.SelectMeasuredValues(ctx)
require.NoError(err)
goldenfiles.CompareMeasuredValues(t, goldenMeasuredValues, measuredValues)
}
func testSelectPressures(t *testing.T) {
require := require.New(t)
ctx := context.Background()
pressures, err := database.SelectPressures(ctx)
require.NoError(err)
goldenfiles.CompareMeasuredValues(t, goldenPressures, pressures)
}
func testSelectTemperatures(t *testing.T) {
require := require.New(t)
ctx := context.Background()
temperatures, err := database.SelectTemperatures(ctx)
require.NoError(err)
goldenfiles.CompareMeasuredValues(t, goldenTemperatures, temperatures)
}
func testDeleteDevices(t *testing.T) {
require := require.New(t)
ctx := context.Background()
err := database.DeleteDevices(ctx, goldenDevices)
require.NoError(err)
for _, goldenDevice := range goldenDevices {
_, err := database.SelectDeviceByID(ctx, goldenDevice.ID)
require.Error(err)
}
}
func testDeleteSensors(t *testing.T) {
require := require.New(t)
ctx := context.Background()
err := database.DeleteSensors(ctx, goldenSensors)
require.NoError(err)
for _, goldenSensor := range goldenSensors {
_, err := database.SelectDeviceByID(ctx, goldenSensor.ID)
require.Error(err)
}
}
func testDeleteHumidity(t *testing.T) {
require := require.New(t)
ctx := context.Background()
err := database.DeleteMeasuredValues(ctx, goldenHumidites)
require.NoError(err)
for _, goldenHumidity := range goldenHumidites {
_, err := database.SelectHumidityByID(ctx, goldenHumidity.ID)
require.Error(err)
}
}
func testDeleteInfo(t *testing.T) {
require := require.New(t)
ctx := context.Background()
err := database.DeleteInfo(ctx, "test")
require.NoError(err)
_, err = database.SelectInfo(ctx, "test")
require.Error(err)
}
func testDeleteMeasuredValues(t *testing.T) {
require := require.New(t)
ctx := context.Background()
err := database.DeleteMeasuredValues(ctx, goldenMeasuredValues)
require.NoError(err)
for _, goldenMeasuredValue := range goldenMeasuredValues {
_, err := database.SelectPressureByID(ctx, goldenMeasuredValue.ID)
require.Error(err)
}
}
func testDeletePressures(t *testing.T) {
require := require.New(t)
ctx := context.Background()
err := database.DeleteMeasuredValues(ctx, goldenPressures)
require.NoError(err)
for _, goldenPressure := range goldenPressures {
_, err := database.SelectPressureByID(ctx, goldenPressure.ID)
require.Error(err)
}
}
func testDeleteTemperatures(t *testing.T) {
require := require.New(t)
ctx := context.Background()
err := database.DeleteMeasuredValues(ctx, goldenTemperatures)
require.NoError(err)
for _, goldenTemperature := range goldenTemperatures {
_, err := database.SelectTemperatureByID(ctx, goldenTemperature.ID)
require.Error(err)
}
}
func testUpdateInfo(t *testing.T) {
require := require.New(t)
ctx := context.Background()
// VALID
err := database.InsertInfo(ctx, "key", "value")
require.NoError(err)
err = database.UpdateInfo(ctx, "key", "value2")
require.NoError(err)
value, err := database.SelectInfo(ctx, "key")
require.NoError(err)
require.Equal("value2", value)
err = database.DeleteInfo(ctx, "key")
require.NoError(err)
// INVALID
err = database.UpdateInfo(ctx, "key2", "value")
require.Error(err)
}

View File

@ -1,2 +0,0 @@
DELETE FROM devices
WHERE device_id = $1;

View File

@ -1,2 +0,0 @@
DELETE FROM humidities
WHERE humidity_id = $1;

View File

@ -1,2 +0,0 @@
DELETE FROM info
WHERE key = $1;

View File

@ -1,2 +0,0 @@
DELETE FROM pressures
WHERE pressure_id = $1;

View File

@ -1,2 +0,0 @@
DELETE FROM sensors
WHERE sensor_id = $1;

View File

@ -1,2 +0,0 @@
DELETE FROM temperatures
WHERE temperature_id = $1;

View File

@ -1,2 +0,0 @@
INSERT INTO info (key, value)
VALUES ($1, $2);

View File

@ -1,152 +0,0 @@
DROP TABLE IF EXISTS devices CASCADE;
DROP TABLE IF EXISTS sensors CASCADE;
DROP TABLE IF EXISTS humidities CASCADE;
DROP TABLE IF EXISTS pressures CASCADE;
DROP TABLE IF EXISTS temperatures CASCADE;
DROP TABLE IF EXISTS info CASCADE;
-- +----------------------------------------+
-- | TABLES |
-- +----------------------------------------+
CREATE TABLE IF NOT EXISTS devices(
device_id CHAR(36) CONSTRAINT pk_devices PRIMARY KEY,
device_name VARCHAR(32) NOT NULL,
device_location VARCHAR(32),
device_last_contact TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP NOT NULL,
creation_date TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP
);
CREATE TABLE IF NOT EXISTS sensors (
sensor_id CHAR(36) CONSTRAINT pk_sensors PRIMARY KEY,
sensor_name VARCHAR(32) NOT NULL,
sensor_location VARCHAR(32) NOT NULL,
wire_id VARCHAR(15),
i2c_bus VARCHAR(255),
i2c_address VARCHAR(12),
gpio_number VARCHAR(6),
sensor_model VARCHAR(16) NOT NULL,
sensor_enabled BOOLEAN DEFAULT TRUE NOT NULL,
sensor_last_contact TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP NOT NULL,
device_id CHAR(36),
creation_date TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP NOT NULL
);
CREATE TABLE IF NOT EXISTS humidities (
humidity_id CHAR(36) CONSTRAINT pk_humidities PRIMARY KEY,
humidity_value NUMERIC(9,3) NOT NULL,
humidity_from_date TIMESTAMP WITH TIME ZONE NOT NULL,
humidity_till_date TIMESTAMP WITH TIME ZONE,
sensor_id CHAR(36),
creation_date TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP NOT NULL,
update_date TIMESTAMP WITH TIME ZONE
);
CREATE TABLE IF NOT EXISTS pressures (
pressure_id CHAR(36) CONSTRAINT pk_pressures PRIMARY KEY,
pressure_value NUMERIC(10,3) NOT NULL,
pressure_from_date TIMESTAMP WITH TIME ZONE NOT NULL,
pressure_till_date TIMESTAMP WITH TIME ZONE,
sensor_id CHAR(36),
creation_date TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP NOT NULL,
update_date TIMESTAMP WITH TIME ZONE
);
CREATE TABLE IF NOT EXISTS temperatures (
temperature_id CHAR(36) CONSTRAINT pk_temperatures PRIMARY KEY,
temperature_value NUMERIC(5,3) NOT NULL,
temperature_from_date TIMESTAMP WITH TIME ZONE NOT NULL,
temperature_till_date TIMESTAMP WITH TIME ZONE,
sensor_id CHAR(36),
creation_date TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP NOT NULL,
update_date TIMESTAMP WITH TIME ZONE
);
CREATE TABLE IF NOT EXISTS info (
key VARCHAR(32) CONSTRAINT pk_info PRIMARY KEY,
value VARCHAR(32) NOT NULL
);
-- +----------------------------------------+
-- | FOREIGN-KEYS |
-- +----------------------------------------+
ALTER TABLE sensors
ADD FOREIGN KEY (device_id)
REFERENCES devices(device_id)
ON DELETE CASCADE
ON UPDATE CASCADE;
ALTER TABLE humidities
ADD FOREIGN KEY (sensor_id)
REFERENCES sensors(sensor_id)
ON DELETE CASCADE
ON UPDATE CASCADE;
ALTER TABLE pressures
ADD FOREIGN KEY (sensor_id)
REFERENCES sensors(sensor_id)
ON DELETE CASCADE
ON UPDATE CASCADE;
ALTER TABLE temperatures
ADD FOREIGN KEY (sensor_id)
REFERENCES sensors(sensor_id)
ON DELETE CASCADE
ON UPDATE CASCADE;
-- +----------------------------------------+
-- | Trigger-Functions |
-- +----------------------------------------+
CREATE OR REPLACE FUNCTION device_last_contact()
RETURNS trigger AS
$BODY$
BEGIN
UPDATE devices
SET device_last_contact = CURRENT_TIMESTAMP
WHERE device_id = NEW.device_id;
RETURN NEW;
END;
$BODY$ LANGUAGE plpgsql;
CREATE OR REPLACE FUNCTION sensor_last_contact()
RETURNS trigger AS
$BODY$
BEGIN
UPDATE sensors
SET sensor_last_contact = CURRENT_TIMESTAMP,
sensor_enabled = true
WHERE sensor_id = NEW.sensor_id;
RETURN NEW;
END;
$BODY$ LANGUAGE plpgsql;
-- +----------------------------------------+
-- | Trigger |
-- +----------------------------------------+
DROP TRIGGER IF EXISTS ai_humidities ON humidities;
DROP TRIGGER IF EXISTS ai_pressure ON pressures;
DROP TRIGGER IF EXISTS ai_temperatures ON temperatures;
CREATE TRIGGER au_sensors
AFTER UPDATE
ON sensors
FOR EACH ROW
EXECUTE PROCEDURE device_last_contact();
CREATE TRIGGER ai_humidities
AFTER INSERT
ON humidities
FOR EACH ROW
EXECUTE PROCEDURE sensor_last_contact();
CREATE TRIGGER ai_pressures
AFTER INSERT
ON pressures
FOR EACH ROW
EXECUTE PROCEDURE sensor_last_contact();
CREATE TRIGGER ai_temperatures
AFTER INSERT
ON temperatures
FOR EACH ROW
EXECUTE PROCEDURE sensor_last_contact();

View File

@ -1,3 +0,0 @@
ALTER TABLE humidities ALTER COLUMN creation_date DROP NOT NULL;
ALTER TABLE pressures ALTER COLUMN creation_date DROP NOT NULL;
ALTER TABLE temperatures ALTER COLUMN creation_date DROP NOT NULL;

View File

@ -1,10 +0,0 @@
SELECT
humidity_id,
humidity_value,
humidity_from_date,
humidity_till_date,
sensor_id,
creation_date,
update_date
FROM
humidities;

View File

@ -1,12 +0,0 @@
SELECT
humidity_id,
humidity_value,
humidity_from_date,
humidity_till_date,
sensor_id,
creation_date,
update_date
FROM
humidities
WHERE
humidity_id = $1;

View File

@ -1,3 +0,0 @@
SELECT value
FROM info
WHERE key = $1;

View File

@ -1,12 +0,0 @@
SELECT
pressure_id,
pressure_value,
pressure_from_date,
pressure_till_date,
sensor_id,
creation_date,
update_date
FROM
pressures
WHERE
pressure_id = $1;

View File

@ -1,10 +0,0 @@
SELECT
pressure_id,
pressure_value,
pressure_from_date,
pressure_till_date,
sensor_id,
creation_date,
update_date
FROM
pressures;

View File

@ -1,12 +0,0 @@
SELECT
temperature_id,
temperature_value,
temperature_from_date,
temperature_till_date,
sensor_id,
creation_date,
update_date
FROM
temperatures
WHERE
temperature_id = $1;

View File

@ -1,10 +0,0 @@
SELECT
temperature_id,
temperature_value,
temperature_from_date,
temperature_till_date,
sensor_id,
creation_date,
update_date
FROM
temperatures;

View File

@ -1,3 +0,0 @@
UPDATE info
SET value = $2
WHERE key = $1;

View File

@ -1,142 +0,0 @@
package logfile
import (
"encoding/csv"
"fmt"
"os"
"path/filepath"
"strconv"
"time"
"github.com/volker-raschek/flucky/pkg/internal/format"
"github.com/volker-raschek/flucky/pkg/types"
)
type csvLogfile struct {
logfile string
}
func (cl *csvLogfile) Read() ([]*types.MeasuredValue, error) {
if _, err := os.Stat(cl.logfile); os.IsNotExist(err) {
if _, err := os.Stat(filepath.Dir(cl.logfile)); os.IsNotExist(err) {
if err := os.MkdirAll(filepath.Dir(cl.logfile), 0755); err != nil {
return nil, fmt.Errorf("%v: %v", errorDirectoryCreate, filepath.Dir(cl.logfile))
}
}
f, err := os.Create(cl.logfile)
if err != nil {
return nil, fmt.Errorf("%v: %v", errorLogfileCreate, cl.logfile)
}
f.Close()
}
f, err := os.Open(cl.logfile)
if err != nil {
return nil, fmt.Errorf("%v: %v", errorLogfileOpen, cl.logfile)
}
defer f.Close()
r := csv.NewReader(f)
records, err := r.ReadAll()
if err != nil {
return nil, fmt.Errorf("%v %v: %v", errorLogfileDecode, cl.logfile, err)
}
measuredValues := make([]*types.MeasuredValue, 0)
for _, record := range records {
// ValueType
valueType, err := types.SelectMeasuredValueType(record[1])
if err != nil {
return nil, fmt.Errorf("%v %v: %v", errorParseFloat, record[1], err)
}
// Value
value, err := strconv.ParseFloat(record[2], 64)
if err != nil {
return nil, fmt.Errorf("%v %v: %v", errorParseFloat, record[2], err)
}
// Times
times := make([]time.Time, 0)
for _, i := range []int{3, 4} {
time, err := time.Parse(format.TimeFormat, record[i])
if err != nil {
return nil, fmt.Errorf("%v %v: %v", errorParseTime, record[i], err)
}
times = append(times, time)
}
measuredValue := &types.MeasuredValue{
ID: record[0],
ValueType: *valueType,
Value: value,
FromDate: times[0],
TillDate: times[1],
SensorID: record[5],
}
// Creation date
if record[6] != "null" {
creationDate, err := time.Parse(format.TimeFormat, record[6])
if err != nil {
return nil, fmt.Errorf("%v %v: %v", errorParseTime, record[6], err)
}
measuredValue.CreationDate = &creationDate
}
// Update date
if record[7] != "null" {
updateDate, err := time.Parse(format.TimeFormat, record[7])
if err != nil {
return nil, fmt.Errorf("%v %v: %v", errorParseTime, record[7], err)
}
measuredValue.UpdateDate = &updateDate
}
measuredValues = append(measuredValues, measuredValue)
}
return measuredValues, nil
}
func (cl *csvLogfile) Write(measuredValues []*types.MeasuredValue) error {
f, err := os.Create(cl.logfile)
if err != nil {
return fmt.Errorf("%v: %v", errorLogfileCreate, cl.logfile)
}
defer f.Close()
w := csv.NewWriter(f)
for _, measuredValue := range measuredValues {
record := []string{
measuredValue.ID,
fmt.Sprintf("%v", measuredValue.ValueType),
fmt.Sprintf("%v", measuredValue.Value),
measuredValue.FromDate.Format(format.TimeFormat),
measuredValue.TillDate.Format(format.TimeFormat),
measuredValue.SensorID,
}
record = append(record, measuredValue.CreationDate.Format(format.TimeFormat))
if measuredValue.UpdateDate != nil {
record = append(record, measuredValue.UpdateDate.Format(format.TimeFormat))
} else {
record = append(record, "null")
}
w.Write(record)
}
w.Flush()
return nil
}

View File

@ -1,30 +0,0 @@
package logfile
import "errors"
var (
errorDirectoryCreate = errors.New("Can not create directory")
errorLogfileCreate = errors.New("Can not create logfile")
errorLogfileDecode = errors.New("Can not decode from reader")
errorLogfileEncode = errors.New("Can not encode from writer")
errorLogfileMarshal = errors.New("Can not marshal values")
errorLogfileNotFound = errors.New("Can not find logfile")
errorLogfileOpen = errors.New("Can not open logfile")
errorLogfileRead = errors.New("Can not read from given reader")
errorLogfileUnmarshal = errors.New("Can not unmarshal values")
errorLogfileWrite = errors.New("Can not write with given writer")
errorNoValidHumidityID = errors.New("No valid humidity id detected or available")
errorNoValidMesuredValue = errors.New("No mesured value detected or available")
errorNoValidSensorID = errors.New("No sensor id detected or available")
errorNoValidTemperatureID = errors.New("No valid temperature id detected or available")
errorNoValidTime = errors.New("No time detected or available")
errorNoValidTimePeriods = errors.New("No valid time periods")
errorParseFloat = errors.New("Can not parse float")
errorParseMeasurementUnit = errors.New("Can not parse mesaurement unit")
errorParseTime = errors.New("Can not parse time")
errorTypeSwitch = errors.New("Can not detect type via type switch")
)

View File

@ -1,11 +0,0 @@
package logfile
import (
"github.com/volker-raschek/flucky/pkg/types"
)
// Logfile is an interface for various logfiles
type Logfile interface {
Read() ([]*types.MeasuredValue, error)
Write(measuredValues []*types.MeasuredValue) error
}

View File

@ -1,69 +0,0 @@
package logfile
import (
"encoding/json"
"fmt"
"os"
"path/filepath"
"github.com/volker-raschek/flucky/pkg/types"
)
type jsonLogfile struct {
logfile string
}
func (jl *jsonLogfile) Read() ([]*types.MeasuredValue, error) {
if _, err := os.Stat(jl.logfile); os.IsNotExist(err) {
if _, err := os.Stat(filepath.Dir(jl.logfile)); os.IsNotExist(err) {
if err := os.MkdirAll(filepath.Dir(jl.logfile), 0755); err != nil {
return nil, fmt.Errorf("%v: %v", errorDirectoryCreate, filepath.Dir(jl.logfile))
}
}
f, err := os.Create(jl.logfile)
if err != nil {
return nil, fmt.Errorf("%v: %v", errorLogfileCreate, jl.logfile)
}
f.WriteString("{}")
f.Close()
}
f, err := os.Open(jl.logfile)
if err != nil {
return nil, fmt.Errorf("%v %v: %v", errorLogfileOpen, jl.logfile, err)
}
measuredValues := make([]*types.MeasuredValue, 0)
if err := json.NewDecoder(f).Decode(&measuredValues); err != nil {
return nil, fmt.Errorf("%v %v: %v", errorLogfileDecode, jl.logfile, err)
}
return measuredValues, nil
}
func (jl *jsonLogfile) Write(measuredValues []*types.MeasuredValue) error {
if _, err := os.Stat(filepath.Dir(jl.logfile)); os.IsNotExist(err) {
if err := os.MkdirAll(filepath.Dir(jl.logfile), 755); err != nil {
return fmt.Errorf("Directory for the logfile can not be created: %v", err)
}
}
f, err := os.Create(jl.logfile)
if err != nil {
return fmt.Errorf("%v %v: %v", errorLogfileCreate, jl.logfile, err)
}
jsonEncoder := json.NewEncoder(f)
jsonEncoder.SetIndent("", " ")
err = jsonEncoder.Encode(measuredValues)
if err != nil {
return fmt.Errorf("%v %v: %v", errorLogfileEncode, jl.logfile, err)
}
return nil
}

View File

@ -1,30 +0,0 @@
package logfile
import (
"path/filepath"
)
// New returns a log file with basic functions for reading and writing data. The
// file extension of the logfile is taken into account to format the logfile
// into the correct format.
func New(logfile string) Logfile {
ext := filepath.Ext(logfile)
switch ext {
case ".csv":
return &csvLogfile{
logfile: logfile,
}
case ".json":
return &jsonLogfile{
logfile: logfile,
}
case ".xml":
return &xmlLogfile{
logfile: logfile,
}
default:
return &jsonLogfile{
logfile: logfile,
}
}
}

View File

@ -1,19 +0,0 @@
package logfile
import (
"encoding/xml"
"github.com/volker-raschek/flucky/pkg/types"
)
// MeasuredValues is an XML Wrapper for an array of measured values
type MeasuredValues struct {
XMLName xml.Name `xml:"measured_values"`
MeasuredValues []*MeasuredValue `xml:"measured_value"`
}
// MeasuredValue is an XML Wrapper for the original measured value struct
type MeasuredValue struct {
XMLName xml.Name `xml:"measured_value"`
*types.MeasuredValue
}

View File

@ -1,84 +0,0 @@
package logfile
import (
"encoding/xml"
"fmt"
"os"
"path/filepath"
"github.com/volker-raschek/flucky/pkg/types"
)
type xmlLogfile struct {
logfile string
}
func (xl *xmlLogfile) GetLogfile() string {
return xl.logfile
}
func (xl *xmlLogfile) Read() ([]*types.MeasuredValue, error) {
if _, err := os.Stat(xl.logfile); os.IsNotExist(err) {
if _, err := os.Stat(filepath.Dir(xl.logfile)); os.IsNotExist(err) {
if err := os.MkdirAll(filepath.Dir(xl.logfile), 0755); err != nil {
return nil, fmt.Errorf("%v: %v", errorDirectoryCreate, filepath.Dir(xl.logfile))
}
}
f, err := os.Create(xl.logfile)
if err != nil {
return nil, fmt.Errorf("%v: %v", errorLogfileCreate, xl.logfile)
}
f.Close()
}
f, err := os.Open(xl.logfile)
if err != nil {
return nil, fmt.Errorf("%v: %v", errorLogfileOpen, xl.logfile)
}
defer f.Close()
measuredValues := new(MeasuredValues)
if err := xml.NewDecoder(f).Decode(&measuredValues); err != nil {
return nil, fmt.Errorf("%v: %v", errorLogfileDecode, err)
}
cachedMeasuredValues := make([]*types.MeasuredValue, 0)
for _, measuredValue := range measuredValues.MeasuredValues {
cachedMeasuredValues = append(cachedMeasuredValues, measuredValue.MeasuredValue)
}
return cachedMeasuredValues, nil
}
func (xl *xmlLogfile) Write(measuredValues []*types.MeasuredValue) error {
f, err := os.Create(xl.logfile)
if err != nil {
return fmt.Errorf("%v: %v", errorLogfileCreate, xl.logfile)
}
defer f.Close()
cachedMeasuredValues := new(MeasuredValues)
for _, measuredValue := range measuredValues {
cachedMeasuredValue := &MeasuredValue{
MeasuredValue: measuredValue,
}
cachedMeasuredValues.MeasuredValues = append(cachedMeasuredValues.MeasuredValues, cachedMeasuredValue)
}
bytes, err := xml.MarshalIndent(cachedMeasuredValues, "", " ")
if err != nil {
return fmt.Errorf("%v: %v", errorLogfileMarshal, err)
}
_, err = f.Write(bytes)
if err != nil {
return fmt.Errorf("%v: %v", errorLogfileWrite, err)
}
return nil
}

View File

@ -11,4 +11,4 @@ INSERT INTO sensors (
device_id,
creation_date
)
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11);
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11);

View File

@ -2,132 +2,239 @@ package storage
import (
"context"
"database/sql"
"fmt"
"math"
"net/url"
"sort"
"path/filepath"
"github.com/volker-raschek/flucky/pkg/internal/format"
"github.com/volker-raschek/flucky/pkg/storage/db"
"github.com/volker-raschek/flucky/pkg/storage/logfile"
_ "github.com/lib/pq"
"github.com/volker-raschek/flucky/pkg/types"
"github.com/volker-raschek/go-logger/pkg/logger"
)
// Compression the measured values. The system checks whether the measured values
// of the same type correspond to those of the predecessor. If this is the case,
// the current value is discarded and the validity date of the previous value is
// set to that of the current value. This means that no information is lost.
// Only the validity period of the measured value is increased.
func Compression(measuredValues []*types.MeasuredValue) []*types.MeasuredValue {
compressedMeasuredValues := make([]*types.MeasuredValue, 0)
lastMeasuredValuesBySensors := make(map[string]map[types.MeasuredValueType]*types.MeasuredValue, 0)
// Sort all measured values according to the start time of the validity date
// in order to successfully implement the subsequent compression.
sort.SliceStable(measuredValues, func(i int, j int) bool {
return measuredValues[i].FromDate.Before(measuredValues[j].TillDate)
})
now := format.FormatedTime()
for _, measuredValue := range measuredValues {
// If the sensor id does not exist in the map, a new map is initialized,
// which can assume measured value types as the key. Behind this key there
// is a pointer which refers to a measured value in the memory. This new map
// is added to the map "lastMeasuredValuesBySensors" under the sensor ID.
// This makes it possible to store one measured value per measured value
// type per sensor.
if _, ok := lastMeasuredValuesBySensors[measuredValue.SensorID]; !ok {
lastMeasuredValuesBySensors[measuredValue.SensorID] = make(map[types.MeasuredValueType]*types.MeasuredValue, 0)
}
if _, ok := lastMeasuredValuesBySensors[measuredValue.SensorID][measuredValue.ValueType]; !ok {
lastMeasuredValuesBySensors[measuredValue.SensorID][measuredValue.ValueType] = measuredValue
continue
}
if lastMeasuredValuesBySensors[measuredValue.SensorID][measuredValue.ValueType].Value == measuredValue.Value {
lastMeasuredValuesBySensors[measuredValue.SensorID][measuredValue.ValueType].TillDate = measuredValue.TillDate
lastMeasuredValuesBySensors[measuredValue.SensorID][measuredValue.ValueType].UpdateDate = &now
} else if lastMeasuredValuesBySensors[measuredValue.SensorID][measuredValue.ValueType].Value != measuredValue.Value {
compressedMeasuredValues = append(compressedMeasuredValues, lastMeasuredValuesBySensors[measuredValue.SensorID][measuredValue.ValueType])
delete(lastMeasuredValuesBySensors[measuredValue.SensorID], measuredValue.ValueType)
lastMeasuredValuesBySensors[measuredValue.SensorID][measuredValue.ValueType] = measuredValue
}
}
// Copy all remaining entries from the map into the cache array
for _, lastMeasuredValuesBySensor := range lastMeasuredValuesBySensors {
for _, measuredValueType := range types.MeasuredValueTypes {
if measuredValue, ok := lastMeasuredValuesBySensor[measuredValueType]; ok {
compressedMeasuredValues = append(compressedMeasuredValues, measuredValue)
}
}
}
// Sort all measured values again to include the measured values from the
// cache.
sort.SliceStable(compressedMeasuredValues, func(i int, j int) bool {
return compressedMeasuredValues[i].FromDate.Before(compressedMeasuredValues[j].FromDate)
})
return compressedMeasuredValues
// Storage is a general interface for a storage endpoint
type Storage interface {
InsertDevice(ctx context.Context, device *types.Device) error
InsertMeasuredValues(ctx context.Context, measuredValues []*types.MeasuredValue) error
InsertSensor(ctx context.Context, sensor *types.Sensor) error
SelectDevice(ctx context.Context, id string) (*types.Device, error)
SelectSensor(ctx context.Context, id string) (*types.Sensor, error)
}
// Read measured values from the given storage endpoint url. The scheme must be
// matched to a provider, if the scheme is not implemented, the function
// returns an error
func Read(ctx context.Context, storageEndpoint *url.URL) ([]*types.MeasuredValue, error) {
switch storageEndpoint.Scheme {
case "file":
measuredValueLogfile := logfile.New(storageEndpoint.Path)
return measuredValueLogfile.Read()
var (
postgresAssetPath = "pkg/storage/postgres"
)
// Postgres implementation
type Postgres struct {
dbo *sql.DB
flogger logger.Logger
}
// InsertDevice into the database
func (postgres *Postgres) InsertDevice(ctx context.Context, device *types.Device) error {
asset := filepath.Join(postgresAssetPath, "insertDevice.sql")
queryBytes, err := Asset(asset)
if err != nil {
return fmt.Errorf("Failed to load asset %v: %v", asset, err)
}
query := string(queryBytes)
tx, err := postgres.dbo.BeginTx(ctx, &sql.TxOptions{ReadOnly: false})
if err != nil {
return fmt.Errorf("Failed to begin new transaction: %v", err)
}
stmt, err := tx.Prepare(query)
if err != nil {
return fmt.Errorf("Failed to prepare statement: %v", err)
}
defer stmt.Close()
_, err = stmt.Exec(&device.ID, &device.Name, &device.Location, &device.CreationDate)
if err != nil {
tx.Rollback()
return fmt.Errorf("Failed to execute statement: %v", err)
}
return tx.Commit()
}
// InsertMeasuredValues into the database
func (postgres *Postgres) InsertMeasuredValues(ctx context.Context, measuredValues []*types.MeasuredValue) error {
splittedMeasuredValues := make(map[string][]*types.MeasuredValue, 0)
for _, measuredValue := range measuredValues {
if _, ok := splittedMeasuredValues[measuredValue.ValueType]; !ok {
splittedMeasuredValues[measuredValue.ValueType] = make([]*types.MeasuredValue, 0)
}
splittedMeasuredValues[measuredValue.ValueType] = append(splittedMeasuredValues[measuredValue.ValueType], measuredValue)
}
tx, err := postgres.dbo.BeginTx(ctx, &sql.TxOptions{ReadOnly: false})
if err != nil {
return fmt.Errorf("Failed to begin new transaction: %v", err)
}
// General insert function
insert := func(tx *sql.Tx, asset string, measuredValues []*types.MeasuredValue) error {
queryBytes, err := Asset(asset)
if err != nil {
return fmt.Errorf("Failed to load asset %v: %v", asset, err)
}
query := string(queryBytes)
stmt, err := tx.Prepare(query)
if err != nil {
return fmt.Errorf("Failed to prepare statement: %v", err)
}
defer stmt.Close()
for _, measuredValue := range measuredValues {
_, err := stmt.Exec(&measuredValue.ID, &measuredValue.Value, &measuredValue.FromDate, &measuredValue.TillDate, &measuredValue.SensorID, &measuredValue.CreationDate, &measuredValue.UpdateDate)
if err != nil {
return fmt.Errorf("Failed to execute statement: %v", err)
}
}
return nil
}
for measuredValueType, measuredValues := range splittedMeasuredValues {
var asset string
switch measuredValueType {
case "humidity":
asset = filepath.Join(postgresAssetPath, "insertHumidity.sql")
case "pressure":
asset = filepath.Join(postgresAssetPath, "insertPressure.sql")
case "temperature":
asset = filepath.Join(postgresAssetPath, "insertTemperature.sql")
default:
tx.Rollback()
return fmt.Errorf("Measured value type %v not supported", measuredValueType)
}
err := insert(tx, asset, measuredValues)
if err != nil {
tx.Rollback()
return err
}
}
return tx.Commit()
}
// InsertSensor into the database
func (postgres *Postgres) InsertSensor(ctx context.Context, sensor *types.Sensor) error {
asset := filepath.Join(postgresAssetPath, "insertSensor.sql")
queryBytes, err := Asset(asset)
if err != nil {
return fmt.Errorf("Failed to load asset %v: %v", asset, err)
}
query := string(queryBytes)
tx, err := postgres.dbo.BeginTx(ctx, &sql.TxOptions{ReadOnly: false})
if err != nil {
return fmt.Errorf("Failed to begin new transaction: %v", err)
}
stmt, err := tx.Prepare(query)
if err != nil {
return fmt.Errorf("Failed to prepare statement: %v", err)
}
defer stmt.Close()
_, err = stmt.Exec(&sensor.ID, &sensor.Name, &sensor.Location, &sensor.WireID, &sensor.I2CBus, &sensor.I2CAddress, &sensor.GPIONumber, &sensor.Model, &sensor.Enabled, &sensor.DeviceID, &sensor.CreationDate)
if err != nil {
tx.Rollback()
return fmt.Errorf("Failed to execute statement: %v", err)
}
return tx.Commit()
}
// SelectDevice from database
func (postgres *Postgres) SelectDevice(ctx context.Context, id string) (*types.Device, error) {
asset := filepath.Join(postgresAssetPath, "selectDeviceByID.sql")
queryBytes, err := Asset(asset)
if err != nil {
return nil, fmt.Errorf("Failed to load asset %v: %v", asset, err)
}
query := string(queryBytes)
tx, err := postgres.dbo.BeginTx(ctx, &sql.TxOptions{ReadOnly: true})
if err != nil {
return nil, fmt.Errorf("Failed to begin new transaction: %v", err)
}
stmt, err := tx.Prepare(query)
if err != nil {
return nil, fmt.Errorf("Failed to prepare statement: %v", err)
}
defer stmt.Close()
row := stmt.QueryRow(id)
device := new(types.Device)
err = row.Scan(&device.ID, &device.Name, &device.Location, &device.CreationDate)
if err != nil {
return nil, fmt.Errorf("Failed to scan row: %v", err)
}
return device, nil
}
// SelectSensor from database
func (postgres *Postgres) SelectSensor(ctx context.Context, id string) (*types.Sensor, error) {
asset := filepath.Join(postgresAssetPath, "selectSensorByID.sql")
queryBytes, err := Asset(asset)
if err != nil {
return nil, fmt.Errorf("Failed to load asset %v: %v", asset, err)
}
query := string(queryBytes)
tx, err := postgres.dbo.BeginTx(ctx, &sql.TxOptions{ReadOnly: true})
if err != nil {
return nil, fmt.Errorf("Failed to begin new transaction: %v", err)
}
stmt, err := tx.Prepare(query)
if err != nil {
return nil, fmt.Errorf("Failed to prepare statement: %v", err)
}
defer stmt.Close()
row := stmt.QueryRow(id)
sensor := new(types.Sensor)
err = row.Scan(&sensor.ID, &sensor.Name, &sensor.Location, &sensor.WireID, &sensor.I2CBus, &sensor.I2CAddress, &sensor.GPIONumber, &sensor.Model, &sensor.Enabled, &sensor.DeviceID, &sensor.CreationDate)
if err != nil {
return nil, fmt.Errorf("Failed to scan row: %v", err)
}
return sensor, nil
}
// New returns a new storage provider
func New(storageEndpoint string, flogger logger.Logger) (Storage, error) {
storageEndpointURL, err := url.Parse(storageEndpoint)
if err != nil {
return nil, err
}
switch storageEndpointURL.Scheme {
case "postgres":
database, err := db.New(storageEndpoint)
newDBO, err := sql.Open(storageEndpointURL.Scheme, storageEndpointURL.String())
if err != nil {
return nil, err
}
defer database.Close()
return database.SelectMeasuredValues(ctx)
}
return nil, fmt.Errorf("No supported scheme")
}
func Round(measuredValues []*types.MeasuredValue, round float64) {
for _, measuredValue := range measuredValues {
measuredValue.Value = math.Round(measuredValue.Value/round) * round
}
}
// Write measured values to the given storage endpoint url. If the storage
// provider defined to a file, the data will be overwritten. If a database
// provider is used, the data is simply added without deleting the existing
// data. The scheme must be matched to a storage provider, if the scheme is not
// implemented, the function returns an error
func Write(ctx context.Context, measuredValues []*types.MeasuredValue, storageEndpoint *url.URL) error {
writeCreationDate(measuredValues)
switch storageEndpoint.Scheme {
case "file":
measuredValueLogfile := logfile.New(storageEndpoint.Path)
return measuredValueLogfile.Write(measuredValues)
case "postgres":
database, err := db.New(storageEndpoint)
if err != nil {
return err
}
defer database.Close()
return database.InsertMeasuredValues(ctx, measuredValues)
return &Postgres{
dbo: newDBO,
flogger: flogger,
}, nil
default:
return fmt.Errorf("No supported scheme")
}
}
func writeCreationDate(measuredValues []*types.MeasuredValue) {
now := format.FormatedTime()
for _, measuredValue := range measuredValues {
if measuredValue.CreationDate == nil {
measuredValue.CreationDate = &now
}
return nil, fmt.Errorf("Unsupported database scheme: %v", storageEndpointURL.Scheme)
}
}

View File

@ -1,286 +0,0 @@
package storage_test
import (
"testing"
"github.com/volker-raschek/flucky/pkg/storage"
"github.com/volker-raschek/flucky/test/goldenfiles"
"github.com/stretchr/testify/require"
)
type testCase struct {
source string
expected string
compression bool
round float64
}
var (
testCases = []*testCase{
// CSV - Uncompressed Not Rounded -> Uncompressed Rounded 0.5
&testCase{
source: "test/goldenfiles/csv/goldenMeasuredValuesUncompressedNotRounded.csv",
expected: "test/goldenfiles/csv/goldenMeasuredValuesUncompressedRounded.csv",
compression: false,
round: 0.5,
},
&testCase{
source: "test/goldenfiles/csv/goldenHumiditiesUncompressedNotRounded.csv",
expected: "test/goldenfiles/csv/goldenHumiditiesUncompressedRounded.csv",
compression: false,
round: 0.5,
},
&testCase{
source: "test/goldenfiles/csv/goldenPressuresUncompressedNotRounded.csv",
expected: "test/goldenfiles/csv/goldenPressuresUncompressedRounded.csv",
compression: false,
round: 0.5,
},
&testCase{
source: "test/goldenfiles/csv/goldenTemperaturesUncompressedNotRounded.csv",
expected: "test/goldenfiles/csv/goldenTemperaturesUncompressedRounded.csv",
compression: false,
round: 0.5,
},
// CSV - Uncompressed Not Rounded -> Compressed Not Rounded
&testCase{
source: "test/goldenfiles/csv/goldenMeasuredValuesUncompressedNotRounded.csv",
expected: "test/goldenfiles/csv/goldenMeasuredValuesCompressedNotRounded.csv",
compression: true,
round: 0,
},
&testCase{
source: "test/goldenfiles/csv/goldenHumiditiesUncompressedNotRounded.csv",
expected: "test/goldenfiles/csv/goldenHumiditiesCompressedNotRounded.csv",
compression: true,
round: 0,
},
&testCase{
source: "test/goldenfiles/csv/goldenPressuresUncompressedNotRounded.csv",
expected: "test/goldenfiles/csv/goldenPressuresCompressedNotRounded.csv",
compression: true,
round: 0,
},
&testCase{
source: "test/goldenfiles/csv/goldenTemperaturesUncompressedNotRounded.csv",
expected: "test/goldenfiles/csv/goldenTemperaturesCompressedNotRounded.csv",
compression: true,
round: 0,
},
// CSV - Uncompressed Not Rounded -> Compressed Rounded 0.5
&testCase{
source: "test/goldenfiles/csv/goldenMeasuredValuesUncompressedNotRounded.csv",
expected: "test/goldenfiles/csv/goldenMeasuredValuesCompressedRounded.csv",
compression: true,
round: 0.5,
},
&testCase{
source: "test/goldenfiles/csv/goldenHumiditiesUncompressedNotRounded.csv",
expected: "test/goldenfiles/csv/goldenHumiditiesCompressedRounded.csv",
compression: true,
round: 0.5,
},
&testCase{
source: "test/goldenfiles/csv/goldenPressuresUncompressedNotRounded.csv",
expected: "test/goldenfiles/csv/goldenPressuresCompressedRounded.csv",
compression: true,
round: 0.5,
},
&testCase{
source: "test/goldenfiles/csv/goldenTemperaturesUncompressedNotRounded.csv",
expected: "test/goldenfiles/csv/goldenTemperaturesCompressedRounded.csv",
compression: true,
round: 0.5,
},
// JSON - Uncompressed Not Rounded -> Uncompressed Rounded 0.5
&testCase{
source: "test/goldenfiles/json/goldenMeasuredValuesUncompressedNotRounded.json",
expected: "test/goldenfiles/json/goldenMeasuredValuesUncompressedRounded.json",
compression: false,
round: 0.5,
},
&testCase{
source: "test/goldenfiles/json/goldenHumiditiesUncompressedNotRounded.json",
expected: "test/goldenfiles/json/goldenHumiditiesUncompressedRounded.json",
compression: false,
round: 0.5,
},
&testCase{
source: "test/goldenfiles/json/goldenPressuresUncompressedNotRounded.json",
expected: "test/goldenfiles/json/goldenPressuresUncompressedRounded.json",
compression: false,
round: 0.5,
},
&testCase{
source: "test/goldenfiles/json/goldenTemperaturesUncompressedNotRounded.json",
expected: "test/goldenfiles/json/goldenTemperaturesUncompressedRounded.json",
compression: false,
round: 0.5,
},
// JSON - Uncompressed Not Rounded -> Compressed Not Rounded
&testCase{
source: "test/goldenfiles/json/goldenMeasuredValuesUncompressedNotRounded.json",
expected: "test/goldenfiles/json/goldenMeasuredValuesCompressedNotRounded.json",
compression: true,
round: 0,
},
&testCase{
source: "test/goldenfiles/json/goldenHumiditiesUncompressedNotRounded.json",
expected: "test/goldenfiles/json/goldenHumiditiesCompressedNotRounded.json",
compression: true,
round: 0,
},
&testCase{
source: "test/goldenfiles/json/goldenPressuresUncompressedNotRounded.json",
expected: "test/goldenfiles/json/goldenPressuresCompressedNotRounded.json",
compression: true,
round: 0,
},
&testCase{
source: "test/goldenfiles/json/goldenTemperaturesUncompressedNotRounded.json",
expected: "test/goldenfiles/json/goldenTemperaturesCompressedNotRounded.json",
compression: true,
round: 0,
},
// JSON - Uncompressed Not Rounded -> Compressed Rounded 0.5
&testCase{
source: "test/goldenfiles/json/goldenMeasuredValuesUncompressedNotRounded.json",
expected: "test/goldenfiles/json/goldenMeasuredValuesCompressedRounded.json",
compression: true,
round: 0.5,
},
&testCase{
source: "test/goldenfiles/json/goldenHumiditiesUncompressedNotRounded.json",
expected: "test/goldenfiles/json/goldenHumiditiesCompressedRounded.json",
compression: true,
round: 0.5,
},
&testCase{
source: "test/goldenfiles/json/goldenPressuresUncompressedNotRounded.json",
expected: "test/goldenfiles/json/goldenPressuresCompressedRounded.json",
compression: true,
round: 0.5,
},
&testCase{
source: "test/goldenfiles/json/goldenTemperaturesUncompressedNotRounded.json",
expected: "test/goldenfiles/json/goldenTemperaturesCompressedRounded.json",
compression: true,
round: 0.5,
},
// XML - Uncompressed Not Rounded -> Uncompressed Rounded 0.5
&testCase{
source: "test/goldenfiles/xml/goldenMeasuredValuesUncompressedNotRounded.xml",
expected: "test/goldenfiles/xml/goldenMeasuredValuesUncompressedRounded.xml",
compression: false,
round: 0.5,
},
&testCase{
source: "test/goldenfiles/xml/goldenHumiditiesUncompressedNotRounded.xml",
expected: "test/goldenfiles/xml/goldenHumiditiesUncompressedRounded.xml",
compression: false,
round: 0.5,
},
&testCase{
source: "test/goldenfiles/xml/goldenPressuresUncompressedNotRounded.xml",
expected: "test/goldenfiles/xml/goldenPressuresUncompressedRounded.xml",
compression: false,
round: 0.5,
},
&testCase{
source: "test/goldenfiles/xml/goldenTemperaturesUncompressedNotRounded.xml",
expected: "test/goldenfiles/xml/goldenTemperaturesUncompressedRounded.xml",
compression: false,
round: 0.5,
},
// XML - Uncompressed Not Rounded -> Compressed Not Rounded
&testCase{
source: "test/goldenfiles/xml/goldenMeasuredValuesUncompressedNotRounded.xml",
expected: "test/goldenfiles/xml/goldenMeasuredValuesCompressedNotRounded.xml",
compression: true,
round: 0,
},
&testCase{
source: "test/goldenfiles/xml/goldenHumiditiesUncompressedNotRounded.xml",
expected: "test/goldenfiles/xml/goldenHumiditiesCompressedNotRounded.xml",
compression: true,
round: 0,
},
&testCase{
source: "test/goldenfiles/xml/goldenPressuresUncompressedNotRounded.xml",
expected: "test/goldenfiles/xml/goldenPressuresCompressedNotRounded.xml",
compression: true,
round: 0,
},
&testCase{
source: "test/goldenfiles/xml/goldenTemperaturesUncompressedNotRounded.xml",
expected: "test/goldenfiles/xml/goldenTemperaturesCompressedNotRounded.xml",
compression: true,
round: 0,
},
// XML - Uncompressed Not Rounded -> Compressed Rounded 0.5
&testCase{
source: "test/goldenfiles/xml/goldenMeasuredValuesUncompressedNotRounded.xml",
expected: "test/goldenfiles/xml/goldenMeasuredValuesCompressedRounded.xml",
compression: true,
round: 0.5,
},
&testCase{
source: "test/goldenfiles/xml/goldenHumiditiesUncompressedNotRounded.xml",
expected: "test/goldenfiles/xml/goldenHumiditiesCompressedRounded.xml",
compression: true,
round: 0.5,
},
&testCase{
source: "test/goldenfiles/xml/goldenPressuresUncompressedNotRounded.xml",
expected: "test/goldenfiles/xml/goldenPressuresCompressedRounded.xml",
compression: true,
round: 0.5,
},
&testCase{
source: "test/goldenfiles/xml/goldenTemperaturesUncompressedNotRounded.xml",
expected: "test/goldenfiles/xml/goldenTemperaturesCompressedRounded.xml",
compression: true,
round: 0.5,
},
}
)
func TestCompressionAndRounding(t *testing.T) {
require := require.New(t)
for _, testCase := range testCases {
measuredValues, err := goldenfiles.GetGoldenMeasuredValues(testCase.source)
require.NoError(err)
expectedMeasuredValues, err := goldenfiles.GetGoldenMeasuredValues(testCase.expected)
require.NoError(err)
actualMeasuredValues := measuredValues
if testCase.round != 0 {
storage.Round(actualMeasuredValues, testCase.round)
}
if testCase.compression {
actualMeasuredValues = storage.Compression(actualMeasuredValues)
}
for i, _ := range expectedMeasuredValues {
require.Equal(expectedMeasuredValues[i].ID, actualMeasuredValues[i].ID, "ID of element %v is not equal between expected and actual", i)
require.Equal(expectedMeasuredValues[i].Value, actualMeasuredValues[i].Value, "Value of element %v is not equal between expected and actual", i)
require.Equal(expectedMeasuredValues[i].ValueType, actualMeasuredValues[i].ValueType, "ValueType of element %v is not equal between expected and actual", i)
require.Equal(expectedMeasuredValues[i].FromDate, actualMeasuredValues[i].FromDate, "FromDate of element %v is not equal between expected and actual", i)
require.Equal(expectedMeasuredValues[i].TillDate, actualMeasuredValues[i].TillDate, "TillDate of element %v is not equal between expected and actual", i)
require.Equal(expectedMeasuredValues[i].SensorID, actualMeasuredValues[i].SensorID, "SensorID of element %v is not equal between expected and actual", i)
}
}
}