+307 -61 +/-11 browse
1 | diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml |
2 | index 086823d..4958e63 100644 |
3 | --- a/.github/workflows/test.yaml |
4 | +++ b/.github/workflows/test.yaml |
5 | @@ -76,16 +76,20 @@ jobs: |
6 | - name: cargo test |
7 | if: success() || failure() # always run even if other steps fail, except when cancelled <https://stackoverflow.com/questions/58858429/how-to-run-a-github-actions-step-even-if-the-previous-step-fails-while-still-f> |
8 | run: | |
9 | - cargo test --all --no-fail-fast --all-features |
10 | + cargo test --all --no-fail-fast --all-features |
11 | - name: cargo-sort |
12 | - if: success() || failure() # always run even if other steps fail, except when cancelled <https://stackoverflow.com/questions/58858429/how-to-run-a-github-actions-step-even-if-the-previous-step-fails-while-still-f> |
13 | + if: success() || failure() |
14 | run: | |
15 | cargo sort --check |
16 | - name: rustfmt |
17 | - if: success() || failure() # always run even if other steps fail, except when cancelled <https://stackoverflow.com/questions/58858429/how-to-run-a-github-actions-step-even-if-the-previous-step-fails-while-still-f> |
18 | + if: success() || failure() |
19 | run: | |
20 | cargo fmt --check --all |
21 | - name: clippy |
22 | - if: success() || failure() # always run even if other steps fail, except when cancelled <https://stackoverflow.com/questions/58858429/how-to-run-a-github-actions-step-even-if-the-previous-step-fails-while-still-f> |
23 | + if: success() || failure() |
24 | run: | |
25 | cargo clippy --no-deps --all-features --all --tests --examples --benches --bins |
26 | + - name: rustdoc |
27 | + if: success() || failure() |
28 | + run: | |
29 | + make rustdoc |
30 | diff --git a/core/build.rs b/core/build.rs |
31 | index 77a3159..23a9bcd 100644 |
32 | --- a/core/build.rs |
33 | +++ b/core/build.rs |
34 | @@ -18,45 +18,141 @@ |
35 | */ |
36 | |
37 | use std::{ |
38 | + fs::{metadata, read_dir, OpenOptions}, |
39 | + io, |
40 | io::Write, |
41 | + path::Path, |
42 | process::{Command, Stdio}, |
43 | }; |
44 | |
45 | + // Source: https://stackoverflow.com/a/64535181 |
46 | + fn is_output_file_outdated<P1, P2>(input: P1, output: P2) -> io::Result<bool> |
47 | + where |
48 | + P1: AsRef<Path>, |
49 | + P2: AsRef<Path>, |
50 | + { |
51 | + let out_meta = metadata(output); |
52 | + if let Ok(meta) = out_meta { |
53 | + let output_mtime = meta.modified()?; |
54 | + |
55 | + // if input file is more recent than our output, we are outdated |
56 | + let input_meta = metadata(input)?; |
57 | + let input_mtime = input_meta.modified()?; |
58 | + |
59 | + Ok(input_mtime > output_mtime) |
60 | + } else { |
61 | + // output file not found, we are outdated |
62 | + Ok(true) |
63 | + } |
64 | + } |
65 | + |
66 | fn main() { |
67 | + println!("cargo:rerun-if-changed=migrations"); |
68 | println!("cargo:rerun-if-changed=src/schema.sql.m4"); |
69 | |
70 | - let output = Command::new("m4") |
71 | - .arg("./src/schema.sql.m4") |
72 | - .output() |
73 | - .unwrap(); |
74 | - if String::from_utf8_lossy(&output.stdout).trim().is_empty() { |
75 | - panic!( |
76 | - "m4 output is empty. stderr was {}", |
77 | - String::from_utf8_lossy(&output.stderr) |
78 | + if is_output_file_outdated("src/schema.sql.m4", "src/schema.sql").unwrap() { |
79 | + let output = Command::new("m4") |
80 | + .arg("./src/schema.sql.m4") |
81 | + .output() |
82 | + .unwrap(); |
83 | + if String::from_utf8_lossy(&output.stdout).trim().is_empty() { |
84 | + panic!( |
85 | + "m4 output is empty. stderr was {}", |
86 | + String::from_utf8_lossy(&output.stderr) |
87 | + ); |
88 | + } |
89 | + let mut verify = Command::new("sqlite3") |
90 | + .stdin(Stdio::piped()) |
91 | + .stdout(Stdio::piped()) |
92 | + .stderr(Stdio::piped()) |
93 | + .spawn() |
94 | + .unwrap(); |
95 | + println!( |
96 | + "Verifying by creating an in-memory database in sqlite3 and feeding it the output \ |
97 | + schema." |
98 | ); |
99 | + verify |
100 | + .stdin |
101 | + .take() |
102 | + .unwrap() |
103 | + .write_all(&output.stdout) |
104 | + .unwrap(); |
105 | + let exit = verify.wait_with_output().unwrap(); |
106 | + if !exit.status.success() { |
107 | + panic!( |
108 | + "sqlite3 could not read SQL schema: {}", |
109 | + String::from_utf8_lossy(&exit.stdout) |
110 | + ); |
111 | + } |
112 | + let mut file = std::fs::File::create("./src/schema.sql").unwrap(); |
113 | + file.write_all(&output.stdout).unwrap(); |
114 | } |
115 | - let mut verify = Command::new("sqlite3") |
116 | - .stdin(Stdio::piped()) |
117 | - .stdout(Stdio::piped()) |
118 | - .stderr(Stdio::piped()) |
119 | - .spawn() |
120 | - .unwrap(); |
121 | - println!( |
122 | - "Verifying by creating an in-memory database in sqlite3 and feeding it the output schema." |
123 | - ); |
124 | - verify |
125 | - .stdin |
126 | - .take() |
127 | - .unwrap() |
128 | - .write_all(&output.stdout) |
129 | - .unwrap(); |
130 | - let exit = verify.wait_with_output().unwrap(); |
131 | - if !exit.status.success() { |
132 | - panic!( |
133 | - "sqlite3 could not read SQL schema: {}", |
134 | - String::from_utf8_lossy(&exit.stdout) |
135 | - ); |
136 | + |
137 | + const MIGRATION_RS: &str = "src/migrations.rs.inc"; |
138 | + |
139 | + let mut regen = false; |
140 | + let mut paths = vec![]; |
141 | + let mut undo_paths = vec![]; |
142 | + for entry in read_dir("migrations").unwrap() { |
143 | + let entry = entry.unwrap(); |
144 | + let path = entry.path(); |
145 | + if path.is_dir() || path.extension().map(|os| os.to_str().unwrap()) != Some("sql") { |
146 | + continue; |
147 | + } |
148 | + if is_output_file_outdated(&path, MIGRATION_RS).unwrap() { |
149 | + regen = true; |
150 | + } |
151 | + if path |
152 | + .file_name() |
153 | + .unwrap() |
154 | + .to_str() |
155 | + .unwrap() |
156 | + .ends_with("undo.sql") |
157 | + { |
158 | + undo_paths.push(path); |
159 | + } else { |
160 | + paths.push(path); |
161 | + } |
162 | + } |
163 | + |
164 | + if regen { |
165 | + paths.sort(); |
166 | + undo_paths.sort(); |
167 | + let mut migr_rs = OpenOptions::new() |
168 | + .write(true) |
169 | + .create(true) |
170 | + .truncate(true) |
171 | + .open(MIGRATION_RS) |
172 | + .unwrap(); |
173 | + migr_rs |
174 | + .write_all(b"\n//(user_version, redo sql, undo sql\n&[") |
175 | + .unwrap(); |
176 | + for (p, u) in paths.iter().zip(undo_paths.iter()) { |
177 | + // This should be a number string, padded with 2 zeros if it's less than 3 |
178 | + // digits. e.g. 001, \d{3} |
179 | + let num = p.file_stem().unwrap().to_str().unwrap(); |
180 | + if !u.file_name().unwrap().to_str().unwrap().starts_with(num) { |
181 | + panic!("Undo file {u:?} should match with {p:?}"); |
182 | + } |
183 | + if num.parse::<u32>().is_err() { |
184 | + panic!("Migration file {p:?} should start with a number"); |
185 | + } |
186 | + migr_rs.write_all(b"(").unwrap(); |
187 | + migr_rs |
188 | + .write_all(num.trim_start_matches('0').as_bytes()) |
189 | + .unwrap(); |
190 | + migr_rs.write_all(b",\"").unwrap(); |
191 | + |
192 | + migr_rs |
193 | + .write_all(std::fs::read_to_string(p).unwrap().as_bytes()) |
194 | + .unwrap(); |
195 | + migr_rs.write_all(b"\",\"").unwrap(); |
196 | + migr_rs |
197 | + .write_all(std::fs::read_to_string(u).unwrap().as_bytes()) |
198 | + .unwrap(); |
199 | + migr_rs.write_all(b"\"),").unwrap(); |
200 | + } |
201 | + migr_rs.write_all(b"]").unwrap(); |
202 | + migr_rs.flush().unwrap(); |
203 | } |
204 | - let mut file = std::fs::File::create("./src/schema.sql").unwrap(); |
205 | - file.write_all(&output.stdout).unwrap(); |
206 | } |
207 | diff --git a/core/migrations/001.sql b/core/migrations/001.sql |
208 | new file mode 100644 |
209 | index 0000000..a62617c |
210 | --- /dev/null |
211 | +++ b/core/migrations/001.sql |
212 | @@ -0,0 +1,4 @@ |
213 | + PRAGMA foreign_keys=ON; |
214 | + BEGIN; |
215 | + ALTER TABLE templates RENAME TO template; |
216 | + COMMIT; |
217 | diff --git a/core/migrations/001.undo.sql b/core/migrations/001.undo.sql |
218 | new file mode 100644 |
219 | index 0000000..86fe8ac |
220 | --- /dev/null |
221 | +++ b/core/migrations/001.undo.sql |
222 | @@ -0,0 +1,4 @@ |
223 | + PRAGMA foreign_keys=ON; |
224 | + BEGIN; |
225 | + ALTER TABLE template RENAME TO templates; |
226 | + COMMIT; |
227 | diff --git a/core/src/connection.rs b/core/src/connection.rs |
228 | index ce550ef..235e878 100644 |
229 | --- a/core/src/connection.rs |
230 | +++ b/core/src/connection.rs |
231 | @@ -91,7 +91,7 @@ fn user_authorizer_callback( |
232 | table_name: "post" | "queue" | "candidate_subscription" | "subscription" | "account", |
233 | } |
234 | | AuthAction::Update { |
235 | - table_name: "candidate_subscription" | "templates", |
236 | + table_name: "candidate_subscription" | "template", |
237 | column_name: "accepted" | "last_modified" | "verified" | "address", |
238 | } |
239 | | AuthAction::Update { |
240 | @@ -129,6 +129,10 @@ impl Connection { |
241 | /// ``` |
242 | pub const SCHEMA: &str = include_str!("./schema.sql"); |
243 | |
244 | + /// Database migrations. |
245 | + pub const MIGRATIONS: &'static [(u32, &'static str, &'static str)] = |
246 | + include!("./migrations.rs.inc"); |
247 | + |
248 | /// Creates a new database connection. |
249 | /// |
250 | /// `Connection` supports a limited subset of operations by default (see |
251 | @@ -159,11 +163,68 @@ impl Connection { |
252 | conn.set_db_config(DbConfig::SQLITE_DBCONFIG_TRUSTED_SCHEMA, false)?; |
253 | conn.busy_timeout(core::time::Duration::from_millis(500))?; |
254 | conn.busy_handler(Some(|times: i32| -> bool { times < 5 }))?; |
255 | - conn.authorizer(Some(user_authorizer_callback)); |
256 | - Ok(Self { |
257 | + |
258 | + let mut ret = Self { |
259 | conf, |
260 | connection: conn, |
261 | - }) |
262 | + }; |
263 | + if let Some(&(latest, _, _)) = Self::MIGRATIONS.last() { |
264 | + let version = ret.schema_version()?; |
265 | + trace!( |
266 | + "SQLITE user_version PRAGMA returned {version}. Most recent migration is {latest}." |
267 | + ); |
268 | + if version < latest { |
269 | + info!("Updating database schema from version {version} to {latest}..."); |
270 | + } |
271 | + ret.migrate(version, latest)?; |
272 | + } |
273 | + |
274 | + ret.connection.authorizer(Some(user_authorizer_callback)); |
275 | + Ok(ret) |
276 | + } |
277 | + |
278 | + /// The version of the current schema. |
279 | + pub fn schema_version(&self) -> Result<u32> { |
280 | + Ok(self |
281 | + .connection |
282 | + .prepare("SELECT user_version FROM pragma_user_version;")? |
283 | + .query_row([], |row| { |
284 | + let v: u32 = row.get(0)?; |
285 | + Ok(v) |
286 | + })?) |
287 | + } |
288 | + |
289 | + /// Migrate from version `from` to `to`. |
290 | + /// |
291 | + /// See [Self::MIGRATIONS]. |
292 | + pub fn migrate(&mut self, mut from: u32, to: u32) -> Result<()> { |
293 | + if from == to { |
294 | + return Ok(()); |
295 | + } |
296 | + |
297 | + let undo = from > to; |
298 | + let tx = self.connection.transaction()?; |
299 | + |
300 | + while from != to { |
301 | + log::trace!( |
302 | + "exec migration from {from} to {to}, type: {}do", |
303 | + if undo { "un " } else { "re" } |
304 | + ); |
305 | + if undo { |
306 | + trace!("{}", Self::MIGRATIONS[from as usize].2); |
307 | + tx.execute(Self::MIGRATIONS[from as usize].2, [])?; |
308 | + from -= 1; |
309 | + } else { |
310 | + trace!("{}", Self::MIGRATIONS[from as usize].1); |
311 | + tx.execute(Self::MIGRATIONS[from as usize].1, [])?; |
312 | + from += 1; |
313 | + } |
314 | + } |
315 | + tx.pragma_update(None, "user_version", Self::MIGRATIONS[to as usize - 1].0)?; |
316 | + |
317 | + tx.commit()?; |
318 | + |
319 | + Ok(()) |
320 | } |
321 | |
322 | /// Removes operational limits from this connection. (see |
323 | @@ -211,8 +272,22 @@ impl Connection { |
324 | let mut stdin = child.stdin.take().unwrap(); |
325 | std::thread::spawn(move || { |
326 | stdin |
327 | - .write_all(include_bytes!("./schema.sql")) |
328 | + .write_all(Self::SCHEMA.as_bytes()) |
329 | .expect("failed to write to stdin"); |
330 | + if !Self::MIGRATIONS.is_empty() { |
331 | + stdin |
332 | + .write_all(b"\nPRAGMA user_version = ") |
333 | + .expect("failed to write to stdin"); |
334 | + stdin |
335 | + .write_all( |
336 | + Self::MIGRATIONS[Self::MIGRATIONS.len() - 1] |
337 | + .0 |
338 | + .to_string() |
339 | + .as_bytes(), |
340 | + ) |
341 | + .expect("failed to write to stdin"); |
342 | + stdin.write_all(b";").expect("failed to write to stdin"); |
343 | + } |
344 | stdin.flush().expect("could not flush stdin"); |
345 | }); |
346 | let output = child.wait_with_output()?; |
347 | diff --git a/core/src/mail.rs b/core/src/mail.rs |
348 | index 612261f..c482c38 100644 |
349 | --- a/core/src/mail.rs |
350 | +++ b/core/src/mail.rs |
351 | @@ -17,8 +17,9 @@ |
352 | * along with this program. If not, see <https://www.gnu.org/licenses/>. |
353 | */ |
354 | |
355 | - //! Types for processing new posts: [`PostFilter`](message_filters::PostFilter), |
356 | - //! [`ListContext`], [`MailJob`] and [`PostAction`]. |
357 | + //! Types for processing new posts: |
358 | + //! [`PostFilter`](crate::message_filters::PostFilter), [`ListContext`], |
359 | + //! [`MailJob`] and [`PostAction`]. |
360 | |
361 | use log::trace; |
362 | use melib::Address; |
363 | @@ -28,7 +29,7 @@ use crate::{ |
364 | DbVal, |
365 | }; |
366 | /// Post action returned from a list's |
367 | - /// [`PostFilter`](message_filters::PostFilter) stack. |
368 | + /// [`PostFilter`](crate::message_filters::PostFilter) stack. |
369 | #[derive(Debug)] |
370 | pub enum PostAction { |
371 | /// Add to `hold` queue. |
372 | @@ -47,8 +48,8 @@ pub enum PostAction { |
373 | }, |
374 | } |
375 | |
376 | - /// List context passed to a list's [`PostFilter`](message_filters::PostFilter) |
377 | - /// stack. |
378 | + /// List context passed to a list's |
379 | + /// [`PostFilter`](crate::message_filters::PostFilter) stack. |
380 | #[derive(Debug)] |
381 | pub struct ListContext<'list> { |
382 | /// Which mailing list a post was addressed to. |
383 | @@ -62,12 +63,12 @@ pub struct ListContext<'list> { |
384 | /// The mailing list subscription policy. |
385 | pub subscription_policy: Option<DbVal<SubscriptionPolicy>>, |
386 | /// The scheduled jobs added by each filter in a list's |
387 | - /// [`PostFilter`](message_filters::PostFilter) stack. |
388 | + /// [`PostFilter`](crate::message_filters::PostFilter) stack. |
389 | pub scheduled_jobs: Vec<MailJob>, |
390 | } |
391 | |
392 | /// Post to be considered by the list's |
393 | - /// [`PostFilter`](message_filters::PostFilter) stack. |
394 | + /// [`PostFilter`](crate::message_filters::PostFilter) stack. |
395 | pub struct PostEntry { |
396 | /// `From` address of post. |
397 | pub from: Address, |
398 | @@ -76,7 +77,7 @@ pub struct PostEntry { |
399 | /// `To` addresses of post. |
400 | pub to: Vec<Address>, |
401 | /// Final action set by each filter in a list's |
402 | - /// [`PostFilter`](message_filters::PostFilter) stack. |
403 | + /// [`PostFilter`](crate::message_filters::PostFilter) stack. |
404 | pub action: PostAction, |
405 | } |
406 | |
407 | @@ -92,7 +93,7 @@ impl core::fmt::Debug for PostEntry { |
408 | } |
409 | |
410 | /// Scheduled jobs added to a [`ListContext`] by a list's |
411 | - /// [`PostFilter`](message_filters::PostFilter) stack. |
412 | + /// [`PostFilter`](crate::message_filters::PostFilter) stack. |
413 | #[derive(Debug)] |
414 | pub enum MailJob { |
415 | /// Send post to recipients. |
416 | diff --git a/core/src/migrations.rs.inc b/core/src/migrations.rs.inc |
417 | new file mode 100644 |
418 | index 0000000..b6ad33e |
419 | --- /dev/null |
420 | +++ b/core/src/migrations.rs.inc |
421 | @@ -0,0 +1,11 @@ |
422 | + |
423 | + //(user_version, redo sql, undo sql |
424 | + &[(1,"PRAGMA foreign_keys=ON; |
425 | + BEGIN; |
426 | + ALTER TABLE templates RENAME TO template; |
427 | + COMMIT; |
428 | + ","PRAGMA foreign_keys=ON; |
429 | + BEGIN; |
430 | + ALTER TABLE template RENAME TO templates; |
431 | + COMMIT; |
432 | + "),] |
433 | \ No newline at end of file |
434 | diff --git a/core/src/schema.sql b/core/src/schema.sql |
435 | index 9c5cf75..30654a6 100644 |
436 | --- a/core/src/schema.sql |
437 | +++ b/core/src/schema.sql |
438 | @@ -256,7 +256,7 @@ CREATE TABLE IF NOT EXISTS post ( |
439 | created INTEGER NOT NULL DEFAULT (unixepoch()) |
440 | ); |
441 | |
442 | - CREATE TABLE IF NOT EXISTS templates ( |
443 | + CREATE TABLE IF NOT EXISTS template ( |
444 | pk INTEGER PRIMARY KEY NOT NULL, |
445 | name TEXT NOT NULL, |
446 | list INTEGER, |
447 | @@ -457,13 +457,13 @@ BEGIN |
448 | WHERE pk = NEW.pk; |
449 | END; |
450 | |
451 | - -- [tag:last_modified_templates]: update last_modified on every change. |
452 | + -- [tag:last_modified_template]: update last_modified on every change. |
453 | CREATE TRIGGER |
454 | - IF NOT EXISTS last_modified_templates |
455 | - AFTER UPDATE ON templates |
456 | + IF NOT EXISTS last_modified_template |
457 | + AFTER UPDATE ON template |
458 | FOR EACH ROW |
459 | WHEN NEW.last_modified != OLD.last_modified |
460 | BEGIN |
461 | - UPDATE templates SET last_modified = unixepoch() |
462 | + UPDATE template SET last_modified = unixepoch() |
463 | WHERE pk = NEW.pk; |
464 | END; |
465 | diff --git a/core/src/schema.sql.m4 b/core/src/schema.sql.m4 |
466 | index 3d0fa1f..0f1cee6 100644 |
467 | --- a/core/src/schema.sql.m4 |
468 | +++ b/core/src/schema.sql.m4 |
469 | @@ -158,7 +158,7 @@ CREATE TABLE IF NOT EXISTS post ( |
470 | created INTEGER NOT NULL DEFAULT (unixepoch()) |
471 | ); |
472 | |
473 | - CREATE TABLE IF NOT EXISTS templates ( |
474 | + CREATE TABLE IF NOT EXISTS template ( |
475 | pk INTEGER PRIMARY KEY NOT NULL, |
476 | name TEXT NOT NULL, |
477 | list INTEGER, |
478 | @@ -288,4 +288,4 @@ update_last_modified(`subscription_policy') |
479 | update_last_modified(`subscription') |
480 | update_last_modified(`account') |
481 | update_last_modified(`candidate_subscription') |
482 | - update_last_modified(`templates') |
483 | + update_last_modified(`template') |
484 | diff --git a/core/src/templates.rs b/core/src/templates.rs |
485 | index 8617b46..31b9b24 100644 |
486 | --- a/core/src/templates.rs |
487 | +++ b/core/src/templates.rs |
488 | @@ -216,7 +216,7 @@ impl Connection { |
489 | pub fn fetch_templates(&self) -> Result<Vec<DbVal<Template>>> { |
490 | let mut stmt = self |
491 | .connection |
492 | - .prepare("SELECT * FROM templates ORDER BY pk;")?; |
493 | + .prepare("SELECT * FROM template ORDER BY pk;")?; |
494 | let iter = stmt.query_map(rusqlite::params![], |row| { |
495 | let pk = row.get("pk")?; |
496 | Ok(DbVal( |
497 | @@ -248,7 +248,7 @@ impl Connection { |
498 | ) -> Result<Option<DbVal<Template>>> { |
499 | let mut stmt = self |
500 | .connection |
501 | - .prepare("SELECT * FROM templates WHERE name = ? AND list IS ?;")?; |
502 | + .prepare("SELECT * FROM template WHERE name = ? AND list IS ?;")?; |
503 | let ret = stmt |
504 | .query_row(rusqlite::params![&template, &list_pk], |row| { |
505 | let pk = row.get("pk")?; |
506 | @@ -268,7 +268,7 @@ impl Connection { |
507 | if ret.is_none() && list_pk.is_some() { |
508 | let mut stmt = self |
509 | .connection |
510 | - .prepare("SELECT * FROM templates WHERE name = ? AND list IS NULL;")?; |
511 | + .prepare("SELECT * FROM template WHERE name = ? AND list IS NULL;")?; |
512 | Ok(stmt |
513 | .query_row(rusqlite::params![&template], |row| { |
514 | let pk = row.get("pk")?; |
515 | @@ -293,7 +293,7 @@ impl Connection { |
516 | /// Insert a named template. |
517 | pub fn add_template(&self, template: Template) -> Result<DbVal<Template>> { |
518 | let mut stmt = self.connection.prepare( |
519 | - "INSERT INTO templates(name, list, subject, headers_json, body) VALUES(?, ?, ?, ?, ?) \ |
520 | + "INSERT INTO template(name, list, subject, headers_json, body) VALUES(?, ?, ?, ?, ?) \ |
521 | RETURNING *;", |
522 | )?; |
523 | let ret = stmt |
524 | @@ -345,7 +345,7 @@ impl Connection { |
525 | pub fn remove_template(&self, template: &str, list_pk: Option<i64>) -> Result<Template> { |
526 | let mut stmt = self |
527 | .connection |
528 | - .prepare("DELETE FROM templates WHERE name = ? AND list IS ? RETURNING *;")?; |
529 | + .prepare("DELETE FROM template WHERE name = ? AND list IS ? RETURNING *;")?; |
530 | let ret = stmt.query_row(rusqlite::params![&template, &list_pk], |row| { |
531 | Ok(Template { |
532 | pk: -1, |
533 | diff --git a/core/tests/migrations.rs b/core/tests/migrations.rs |
534 | new file mode 100644 |
535 | index 0000000..84c6448 |
536 | --- /dev/null |
537 | +++ b/core/tests/migrations.rs |
538 | @@ -0,0 +1,51 @@ |
539 | + /* |
540 | + * This file is part of mailpot |
541 | + * |
542 | + * Copyright 2020 - Manos Pitsidianakis |
543 | + * |
544 | + * This program is free software: you can redistribute it and/or modify |
545 | + * it under the terms of the GNU Affero General Public License as |
546 | + * published by the Free Software Foundation, either version 3 of the |
547 | + * License, or (at your option) any later version. |
548 | + * |
549 | + * This program is distributed in the hope that it will be useful, |
550 | + * but WITHOUT ANY WARRANTY; without even the implied warranty of |
551 | + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
552 | + * GNU Affero General Public License for more details. |
553 | + * |
554 | + * You should have received a copy of the GNU Affero General Public License |
555 | + * along with this program. If not, see <https://www.gnu.org/licenses/>. |
556 | + */ |
557 | + |
558 | + use mailpot::{Configuration, Connection, SendMail}; |
559 | + use mailpot_tests::init_stderr_logging; |
560 | + use tempfile::TempDir; |
561 | + |
562 | + #[test] |
563 | + fn test_init_empty() { |
564 | + init_stderr_logging(); |
565 | + let tmp_dir = TempDir::new().unwrap(); |
566 | + |
567 | + let db_path = tmp_dir.path().join("mpot.db"); |
568 | + let config = Configuration { |
569 | + send_mail: SendMail::ShellCommand("/usr/bin/false".to_string()), |
570 | + db_path, |
571 | + data_path: tmp_dir.path().to_path_buf(), |
572 | + administrators: vec![], |
573 | + }; |
574 | + |
575 | + let mut db = Connection::open_or_create_db(config).unwrap().trusted(); |
576 | + |
577 | + let migrations = Connection::MIGRATIONS; |
578 | + if migrations.is_empty() { |
579 | + return; |
580 | + } |
581 | + |
582 | + let version = db.schema_version().unwrap(); |
583 | + |
584 | + assert_eq!(version, migrations[migrations.len() - 1].0); |
585 | + |
586 | + db.migrate(version, migrations[0].0).unwrap(); |
587 | + |
588 | + db.migrate(migrations[0].0, version).unwrap(); |
589 | + } |