Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 10 additions & 1 deletion crates/core/src/db/relational_db.rs
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ use spacetimedb_sats::{AlgebraicType, AlgebraicValue, ProductType, ProductValue}
use spacetimedb_schema::def::{ModuleDef, TableDef, ViewDef};
use spacetimedb_schema::reducer_name::ReducerName;
use spacetimedb_schema::schema::{
ColumnSchema, IndexSchema, RowLevelSecuritySchema, Schema, SequenceSchema, TableSchema,
ColumnSchema, ConstraintSchema, IndexSchema, RowLevelSecuritySchema, Schema, SequenceSchema, TableSchema,
};
use spacetimedb_schema::table_name::TableName;
use spacetimedb_snapshot::{ReconstructedSnapshot, SnapshotError, SnapshotRepository};
Expand Down Expand Up @@ -1482,6 +1482,15 @@ impl RelationalDB {
Ok(self.inner.drop_sequence_mut_tx(tx, seq_id)?)
}

/// Creates a constraint, making the corresponding index unique if applicable.
pub fn create_constraint(
&self,
tx: &mut MutTx,
constraint: ConstraintSchema,
) -> Result<ConstraintId, DBError> {
Ok(self.inner.create_constraint_mut_tx(tx, constraint)?)
}

///Removes the [Constraints] from database instance
pub fn drop_constraint(&self, tx: &mut MutTx, constraint_id: ConstraintId) -> Result<(), DBError> {
Ok(self.inner.drop_constraint_mut_tx(tx, constraint_id)?)
Expand Down
19 changes: 19 additions & 0 deletions crates/core/src/db/update.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ use spacetimedb_lib::db::auth::StTableType;
use spacetimedb_lib::identity::AuthCtx;
use spacetimedb_lib::AlgebraicValue;
use spacetimedb_primitives::{ColSet, TableId};
use spacetimedb_schema::schema::ConstraintSchema;
use spacetimedb_schema::auto_migrate::{AutoMigratePlan, ManualMigratePlan, MigratePlan};
use spacetimedb_schema::def::{TableDef, ViewDef};
use spacetimedb_schema::schema::{column_schemas_from_defs, IndexSchema, Schema, SequenceSchema, TableSchema};
Expand Down Expand Up @@ -220,6 +221,24 @@ fn auto_migrate_database(
);
stdb.drop_constraint(tx, constraint_schema.constraint_id)?;
}
spacetimedb_schema::auto_migrate::AutoMigrateStep::AddConstraint(constraint_name) => {
let table_def = plan.new.stored_in_table_def(constraint_name).unwrap();
let constraint_def = &table_def.constraints[constraint_name];
let table_id = stdb.table_id_from_name_mut(tx, &table_def.name)?.unwrap();
let constraint_schema = ConstraintSchema::from_module_def(
plan.new,
constraint_def,
table_id,
spacetimedb_primitives::ConstraintId::SENTINEL,
);
log!(
logger,
"Adding constraint `{}` on table `{}`",
constraint_name,
table_def.name
);
stdb.create_constraint(tx, constraint_schema)?;
}
spacetimedb_schema::auto_migrate::AutoMigrateStep::AddSequence(sequence_name) => {
let table_def = plan.new.stored_in_table_def(sequence_name).unwrap();
let sequence_def = table_def.sequences.get(sequence_name).unwrap();
Expand Down
24 changes: 20 additions & 4 deletions crates/datastore/src/locking_tx_datastore/committed_state.rs
Original file line number Diff line number Diff line change
Expand Up @@ -149,7 +149,7 @@ impl CommittedState {
}

/// Returns the views that perform a full scan of this table
pub(super) fn views_for_table_scan(&self, table_id: &TableId) -> impl Iterator<Item = &ViewCallInfo> + use<'_> {
pub(super) fn views_for_table_scan(&self, table_id: &TableId) -> impl Iterator<Item = &ViewCallInfo> {
self.read_sets.views_for_table_scan(table_id)
}

Expand All @@ -158,7 +158,7 @@ impl CommittedState {
&'a self,
table_id: &TableId,
row_ref: RowRef<'a>,
) -> impl Iterator<Item = &'a ViewCallInfo> + use<'a> {
) -> impl Iterator<Item = &'a ViewCallInfo> {
self.read_sets.views_for_index_seek(table_id, row_ref)
}
}
Expand Down Expand Up @@ -1324,14 +1324,30 @@ impl CommittedState {
.unwrap_or_else(|e| match e {});
}
// A constraint was removed. Add it back.
ConstraintRemoved(table_id, constraint_schema) => {
ConstraintRemoved(table_id, constraint_schema, index_id) => {
let table = self.tables.get_mut(&table_id)?;
table.with_mut_schema(|s| s.update_constraint(constraint_schema));
// If the constraint had a unique index, make it unique again.
if let Some(index_id) = index_id {
if let Some(idx) = table.indexes.get_mut(&index_id) {
idx.make_unique().expect("rollback: index should have no duplicates");
}
}
}
// A constraint was added. Remove it.
ConstraintAdded(table_id, constraint_id) => {
ConstraintAdded(table_id, constraint_id, index_id, pointer_map) => {
let table = self.tables.get_mut(&table_id)?;
table.with_mut_schema(|s| s.remove_constraint(constraint_id));
// If the constraint made an index unique, revert it to non-unique.
if let Some(index_id) = index_id {
if let Some(idx) = table.indexes.get_mut(&index_id) {
idx.make_non_unique();
}
}
// Restore the pointer map if it was taken.
if let Some(pm) = pointer_map {
table.restore_pointer_map(pm);
}
}
// A sequence was removed. Add it back.
SequenceRemoved(table_id, seq, schema) => {
Expand Down
214 changes: 208 additions & 6 deletions crates/datastore/src/locking_tx_datastore/datastore.rs
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ use spacetimedb_sats::{memory_usage::MemoryUsage, Deserialize};
use spacetimedb_schema::table_name::TableName;
use spacetimedb_schema::{
reducer_name::ReducerName,
schema::{ColumnSchema, IndexSchema, SequenceSchema, TableSchema},
schema::{ColumnSchema, ConstraintSchema, IndexSchema, SequenceSchema, TableSchema},
};
use spacetimedb_snapshot::{ReconstructedSnapshot, SnapshotRepository};
use spacetimedb_table::{
Expand Down Expand Up @@ -575,6 +575,14 @@ impl MutTxDatastore for Locking {
tx.sequence_id_from_name(sequence_name)
}

fn create_constraint_mut_tx(
&self,
tx: &mut Self::MutTx,
constraint: ConstraintSchema,
) -> Result<ConstraintId> {
tx.create_constraint(constraint)
}

fn drop_constraint_mut_tx(&self, tx: &mut Self::MutTx, constraint_id: ConstraintId) -> Result<()> {
tx.drop_constraint(constraint_id)
}
Expand Down Expand Up @@ -1206,13 +1214,14 @@ impl<F: FnMut(u64)> spacetimedb_commitlog::payload::txdata::Visitor for ReplayVi
// TODO: avoid clone
Ok(schema) => schema.table_name.clone(),

Err(_) => match self.dropped_table_names.remove(&table_id) {
Some(name) => name,
_ => {
Err(_) => {
if let Some(name) = self.dropped_table_names.remove(&table_id) {
name
} else {
return self
.process_error(anyhow!("Error looking up name for truncated table {table_id:?}").into());
}
},
}
};

if let Err(e) = self.committed_state.replay_truncate(table_id).with_context(|| {
Expand Down Expand Up @@ -1300,7 +1309,7 @@ mod tests {
use spacetimedb_lib::error::ResultTest;
use spacetimedb_lib::st_var::StVarValue;
use spacetimedb_lib::{resolved_type_via_v9, ScheduleAt, TimeDuration};
use spacetimedb_primitives::{col_list, ArgId, ColId, ScheduleId, ViewId};
use spacetimedb_primitives::{col_list, ArgId, ColId, ColSet, ScheduleId, ViewId};
use spacetimedb_sats::algebraic_value::ser::value_serialize;
use spacetimedb_sats::bsatn::ToBsatn;
use spacetimedb_sats::layout::RowTypeLayout;
Expand Down Expand Up @@ -3975,4 +3984,197 @@ mod tests {
);
Ok(())
}

/// Helper: create a table with a non-unique btree index on `col_pos` but no constraints.
fn table_with_non_unique_index(col_pos: u16) -> TableSchema {
let indices = vec![IndexSchema::for_test(
"Foo_idx_btree",
BTreeAlgorithm::from(col_pos),
)];
basic_table_schema_with_indices(indices, Vec::<ConstraintSchema>::new())
}

/// Helper: create a table with a non-unique btree index on multiple columns but no constraints.
fn table_with_non_unique_multi_col_index(cols: impl Into<ColList>) -> TableSchema {
let indices = vec![IndexSchema::for_test(
"Foo_multi_idx_btree",
BTreeAlgorithm { columns: cols.into() },
)];
basic_table_schema_with_indices(indices, Vec::<ConstraintSchema>::new())
}

#[test]
fn test_create_constraint_makes_index_unique() -> ResultTest<()> {
let datastore = get_datastore()?;

// TX1: create table with non-unique index on col 0.
let mut tx = begin_mut_tx(&datastore);
let schema = table_with_non_unique_index(0);
let table_id = datastore.create_table_mut_tx(&mut tx, schema)?;
commit(&datastore, tx)?;

// TX2: insert unique rows and commit.
let mut tx = begin_mut_tx(&datastore);
insert(&datastore, &mut tx, table_id, &u32_str_u32(1, "Alice", 30))?;
insert(&datastore, &mut tx, table_id, &u32_str_u32(2, "Bob", 25))?;
commit(&datastore, tx)?;

// TX3: add unique constraint — should succeed since data is unique.
let mut tx = begin_mut_tx(&datastore);
let mut constraint = ConstraintSchema::unique_for_test("Foo_id_unique", 0u16);
constraint.table_id = table_id;
datastore.create_constraint_mut_tx(&mut tx, constraint)?;

// Inserting a duplicate should now fail (index is unique).
let dup_result = insert(&datastore, &mut tx, table_id, &u32_str_u32(1, "Charlie", 20));
assert!(dup_result.is_err(), "duplicate insert should fail after adding unique constraint");
commit(&datastore, tx)?;

Ok(())
}

#[test]
fn test_create_constraint_rollback_restores_non_unique() -> ResultTest<()> {
let datastore = get_datastore()?;

// TX1: create table with non-unique index on col 0.
let mut tx = begin_mut_tx(&datastore);
let schema = table_with_non_unique_index(0);
let table_id = datastore.create_table_mut_tx(&mut tx, schema)?;
commit(&datastore, tx)?;

// TX2: insert unique rows and commit.
let mut tx = begin_mut_tx(&datastore);
insert(&datastore, &mut tx, table_id, &u32_str_u32(1, "Alice", 30))?;
insert(&datastore, &mut tx, table_id, &u32_str_u32(2, "Bob", 25))?;
commit(&datastore, tx)?;

// TX3: add unique constraint, then rollback.
let mut tx = begin_mut_tx(&datastore);
let mut constraint = ConstraintSchema::unique_for_test("Foo_id_unique", 0u16);
constraint.table_id = table_id;
datastore.create_constraint_mut_tx(&mut tx, constraint)?;
let _ = datastore.rollback_mut_tx(tx);

// TX4: after rollback, duplicates should be allowed again.
let mut tx = begin_mut_tx(&datastore);
let result = insert(&datastore, &mut tx, table_id, &u32_str_u32(1, "Charlie", 20));
assert!(result.is_ok(), "duplicate insert should succeed after rollback of unique constraint");
Ok(())
}

#[test]
fn test_create_constraint_fails_with_duplicates() -> ResultTest<()> {
let datastore = get_datastore()?;

// TX1: create table with non-unique index on col 0.
let mut tx = begin_mut_tx(&datastore);
let schema = table_with_non_unique_index(0);
let table_id = datastore.create_table_mut_tx(&mut tx, schema)?;
commit(&datastore, tx)?;

// TX2: insert duplicate rows and commit.
let mut tx = begin_mut_tx(&datastore);
insert(&datastore, &mut tx, table_id, &u32_str_u32(1, "Alice", 30))?;
insert(&datastore, &mut tx, table_id, &u32_str_u32(1, "Bob", 25))?; // duplicate id=1
commit(&datastore, tx)?;

// TX3: try to add unique constraint — should fail.
let mut tx = begin_mut_tx(&datastore);
let mut constraint = ConstraintSchema::unique_for_test("Foo_id_unique", 0u16);
constraint.table_id = table_id;
let result = datastore.create_constraint_mut_tx(&mut tx, constraint);
assert!(result.is_err(), "create_constraint should fail when duplicates exist");

Ok(())
}

#[test]
fn test_create_constraint_multi_col() -> ResultTest<()> {
let datastore = get_datastore()?;

// TX1: create table with non-unique multi-column index on (col 0, col 2).
let mut tx = begin_mut_tx(&datastore);
let schema = table_with_non_unique_multi_col_index(col_list![0, 2]);
let table_id = datastore.create_table_mut_tx(&mut tx, schema)?;
commit(&datastore, tx)?;

// TX2: insert rows unique on (id, age) and commit.
let mut tx = begin_mut_tx(&datastore);
insert(&datastore, &mut tx, table_id, &u32_str_u32(1, "Alice", 30))?;
insert(&datastore, &mut tx, table_id, &u32_str_u32(1, "Bob", 25))?; // same id, different age
commit(&datastore, tx)?;

// TX3: add unique constraint on (col 0, col 2) — should succeed.
let mut tx = begin_mut_tx(&datastore);
let mut constraint = ConstraintSchema::unique_for_test(
"Foo_id_age_unique",
ColSet::from(col_list![0, 2]),
);
constraint.table_id = table_id;
datastore.create_constraint_mut_tx(&mut tx, constraint)?;
commit(&datastore, tx)?;

Ok(())
}

#[test]
fn test_drop_constraint_makes_index_non_unique() -> ResultTest<()> {
let datastore = get_datastore()?;

// TX1: create table with unique constraint.
let mut tx = begin_mut_tx(&datastore);
let schema = basic_table_schema_with_indices(basic_indices(), basic_constraints());
let table_id = datastore.create_table_mut_tx(&mut tx, schema)?;
commit(&datastore, tx)?;

// TX2: insert a row.
let mut tx = begin_mut_tx(&datastore);
insert(&datastore, &mut tx, table_id, &u32_str_u32(1, "Alice", 30))?;
commit(&datastore, tx)?;

// TX3: drop the unique constraint on col 0.
let mut tx = begin_mut_tx(&datastore);
let constraint_id = tx
.constraint_id_from_name("Foo_id_key")?
.expect("constraint should exist");
datastore.drop_constraint_mut_tx(&mut tx, constraint_id)?;

// Inserting a duplicate on col 0 should now succeed.
let result = insert(&datastore, &mut tx, table_id, &u32_str_u32(1, "Bob", 25));
assert!(result.is_ok(), "duplicate insert should succeed after dropping unique constraint");
commit(&datastore, tx)?;

Ok(())
}

#[test]
fn test_drop_constraint_rollback_keeps_unique() -> ResultTest<()> {
let datastore = get_datastore()?;

// TX1: create table with unique constraint.
let mut tx = begin_mut_tx(&datastore);
let schema = basic_table_schema_with_indices(basic_indices(), basic_constraints());
let table_id = datastore.create_table_mut_tx(&mut tx, schema)?;
commit(&datastore, tx)?;

// TX2: insert a row.
let mut tx = begin_mut_tx(&datastore);
insert(&datastore, &mut tx, table_id, &u32_str_u32(1, "Alice", 30))?;
commit(&datastore, tx)?;

// TX3: drop constraint, then rollback.
let mut tx = begin_mut_tx(&datastore);
let constraint_id = tx
.constraint_id_from_name("Foo_id_key")?
.expect("constraint should exist");
datastore.drop_constraint_mut_tx(&mut tx, constraint_id)?;
let _ = datastore.rollback_mut_tx(tx);

// TX4: after rollback, constraint should be back — duplicates should fail.
let mut tx = begin_mut_tx(&datastore);
let dup_result = insert(&datastore, &mut tx, table_id, &u32_str_u32(1, "Bob", 25));
assert!(dup_result.is_err(), "duplicate insert should fail after rollback of drop constraint");
Ok(())
}
}
Loading