2016-10-20 17:00:05 +08:00
|
|
|
/*
|
2019-06-24 22:01:18 +08:00
|
|
|
Copyright © 2015-2019 by The qTox Project Contributors
|
2016-10-20 17:00:05 +08:00
|
|
|
|
|
|
|
This file is part of qTox, a Qt-based graphical interface for Tox.
|
|
|
|
|
|
|
|
qTox is libre software: you can redistribute it and/or modify
|
|
|
|
it under the terms of the GNU General Public License as published by
|
|
|
|
the Free Software Foundation, either version 3 of the License, or
|
|
|
|
(at your option) any later version.
|
|
|
|
|
|
|
|
qTox is distributed in the hope that it will be useful,
|
|
|
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
GNU General Public License for more details.
|
|
|
|
|
|
|
|
You should have received a copy of the GNU General Public License
|
|
|
|
along with qTox. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
*/
|
|
|
|
|
2015-12-17 18:24:01 +08:00
|
|
|
#include <QDebug>
|
|
|
|
#include <cassert>
|
|
|
|
|
2016-10-20 17:00:05 +08:00
|
|
|
#include "history.h"
|
|
|
|
#include "profile.h"
|
|
|
|
#include "settings.h"
|
2017-02-26 19:52:45 +08:00
|
|
|
#include "db/rawdatabase.h"
|
2015-12-17 18:24:01 +08:00
|
|
|
|
2019-05-01 15:50:19 +08:00
|
|
|
namespace {
|
2019-10-11 14:13:26 +08:00
|
|
|
static constexpr int SCHEMA_VERSION = 3;
|
2019-05-01 15:50:19 +08:00
|
|
|
|
2019-10-07 15:08:01 +08:00
|
|
|
bool createCurrentSchema(RawDatabase& db)
|
2019-05-01 15:50:19 +08:00
|
|
|
{
|
2019-10-07 10:09:42 +08:00
|
|
|
QVector<RawDatabase::Query> queries;
|
2019-05-01 15:50:19 +08:00
|
|
|
queries += RawDatabase::Query(QStringLiteral(
|
|
|
|
"CREATE TABLE peers (id INTEGER PRIMARY KEY, "
|
|
|
|
"public_key TEXT NOT NULL UNIQUE);"
|
|
|
|
"CREATE TABLE aliases (id INTEGER PRIMARY KEY, "
|
|
|
|
"owner INTEGER, "
|
|
|
|
"display_name BLOB NOT NULL, "
|
|
|
|
"UNIQUE(owner, display_name));"
|
|
|
|
"CREATE TABLE history "
|
|
|
|
"(id INTEGER PRIMARY KEY, "
|
|
|
|
"timestamp INTEGER NOT NULL, "
|
|
|
|
"chat_id INTEGER NOT NULL, "
|
|
|
|
"sender_alias INTEGER NOT NULL, "
|
|
|
|
// even though technically a message can be null for file transfer, we've opted
|
|
|
|
// to just insert an empty string when there's no content, this moderately simplifies
|
|
|
|
// implementating to leakon as currently our database doesn't have support for optional
|
|
|
|
// fields. We would either have to insert "?" or "null" based on if message exists and then
|
|
|
|
// ensure that our blob vector always has the right number of fields. Better to just
|
|
|
|
// leave this as NOT NULL for now.
|
|
|
|
"message BLOB NOT NULL, "
|
|
|
|
"file_id INTEGER);"
|
|
|
|
"CREATE TABLE file_transfers "
|
|
|
|
"(id INTEGER PRIMARY KEY, "
|
|
|
|
"chat_id INTEGER NOT NULL, "
|
|
|
|
"file_restart_id BLOB NOT NULL, "
|
|
|
|
"file_name BLOB NOT NULL, "
|
|
|
|
"file_path BLOB NOT NULL, "
|
|
|
|
"file_hash BLOB NOT NULL, "
|
|
|
|
"file_size INTEGER NOT NULL, "
|
|
|
|
"direction INTEGER NOT NULL, "
|
|
|
|
"file_state INTEGER NOT NULL);"
|
fix(history): move stuck pending message into broken_messages table
Fix #5776
Due to a long standing bug, faux offline message have been able to become stuck
going back years. Because of recent fixes to history loading, faux offline
messages will correctly all be sent on connection, but this causes an issue of
long stuck messages suddenly being delivered to a friend, out of context,
creating a confusing interaction. To work around this, this upgrade moves any
faux offline messages in a chat that are older than the last successfully
delivered message, indicating they were stuck, to a new table,
`broken_messages`, preventing them from ever being sent in the future.
2019-10-06 16:23:24 +08:00
|
|
|
"CREATE TABLE faux_offline_pending (id INTEGER PRIMARY KEY);"
|
|
|
|
"CREATE TABLE broken_messages (id INTEGER PRIMARY KEY);"));
|
2019-10-07 10:09:42 +08:00
|
|
|
queries += RawDatabase::Query(QStringLiteral("PRAGMA user_version = %1;").arg(SCHEMA_VERSION));
|
2019-10-07 15:08:01 +08:00
|
|
|
return db.execNow(queries);
|
2019-05-01 15:50:19 +08:00
|
|
|
}
|
|
|
|
|
2019-10-07 15:08:01 +08:00
|
|
|
bool isNewDb(std::shared_ptr<RawDatabase>& db, bool& success)
|
2019-05-01 15:50:19 +08:00
|
|
|
{
|
|
|
|
bool newDb;
|
|
|
|
if (!db->execNow(RawDatabase::Query("SELECT COUNT(*) FROM sqlite_master;",
|
|
|
|
[&](const QVector<QVariant>& row) {
|
|
|
|
newDb = row[0].toLongLong() == 0;
|
|
|
|
}))) {
|
|
|
|
db.reset();
|
2019-10-07 15:08:01 +08:00
|
|
|
success = false;
|
|
|
|
return false;
|
2019-05-01 15:50:19 +08:00
|
|
|
}
|
2019-10-07 15:08:01 +08:00
|
|
|
success = true;
|
2019-05-01 15:50:19 +08:00
|
|
|
return newDb;
|
|
|
|
}
|
|
|
|
|
2019-10-07 15:08:01 +08:00
|
|
|
bool dbSchema0to1(RawDatabase& db)
|
2019-05-01 15:50:19 +08:00
|
|
|
{
|
2019-10-07 10:09:42 +08:00
|
|
|
QVector<RawDatabase::Query> queries;
|
2019-05-01 15:50:19 +08:00
|
|
|
queries +=
|
|
|
|
RawDatabase::Query(QStringLiteral(
|
|
|
|
"CREATE TABLE file_transfers "
|
|
|
|
"(id INTEGER PRIMARY KEY, "
|
|
|
|
"chat_id INTEGER NOT NULL, "
|
|
|
|
"file_restart_id BLOB NOT NULL, "
|
|
|
|
"file_name BLOB NOT NULL, "
|
|
|
|
"file_path BLOB NOT NULL, "
|
|
|
|
"file_hash BLOB NOT NULL, "
|
|
|
|
"file_size INTEGER NOT NULL, "
|
|
|
|
"direction INTEGER NOT NULL, "
|
|
|
|
"file_state INTEGER NOT NULL);"));
|
|
|
|
queries +=
|
|
|
|
RawDatabase::Query(QStringLiteral("ALTER TABLE history ADD file_id INTEGER;"));
|
2019-10-07 10:09:42 +08:00
|
|
|
queries += RawDatabase::Query(QStringLiteral("PRAGMA user_version = 1;"));
|
2019-10-07 15:08:01 +08:00
|
|
|
return db.execNow(queries);
|
2019-05-01 15:50:19 +08:00
|
|
|
}
|
|
|
|
|
fix(history): move stuck pending message into broken_messages table
Fix #5776
Due to a long standing bug, faux offline message have been able to become stuck
going back years. Because of recent fixes to history loading, faux offline
messages will correctly all be sent on connection, but this causes an issue of
long stuck messages suddenly being delivered to a friend, out of context,
creating a confusing interaction. To work around this, this upgrade moves any
faux offline messages in a chat that are older than the last successfully
delivered message, indicating they were stuck, to a new table,
`broken_messages`, preventing them from ever being sent in the future.
2019-10-06 16:23:24 +08:00
|
|
|
bool dbSchema1to2(RawDatabase& db)
|
|
|
|
{
|
|
|
|
// Any faux_offline_pending message, in a chat that has newer delivered
|
|
|
|
// message is decided to be broken. It must be moved from
|
|
|
|
// faux_offline_pending to broken_messages
|
|
|
|
|
|
|
|
// the last non-pending message in each chat
|
|
|
|
QString lastDeliveredQuery = QString(
|
|
|
|
"SELECT chat_id, MAX(history.id) FROM "
|
|
|
|
"history JOIN peers chat ON chat_id = chat.id "
|
|
|
|
"LEFT JOIN faux_offline_pending ON history.id = faux_offline_pending.id "
|
|
|
|
"WHERE faux_offline_pending.id IS NULL "
|
|
|
|
"GROUP BY chat_id;");
|
|
|
|
|
|
|
|
QVector<RawDatabase::Query> upgradeQueries;
|
|
|
|
upgradeQueries +=
|
|
|
|
RawDatabase::Query(QStringLiteral(
|
|
|
|
"CREATE TABLE broken_messages "
|
|
|
|
"(id INTEGER PRIMARY KEY);"));
|
|
|
|
|
|
|
|
auto rowCallback = [&upgradeQueries](const QVector<QVariant>& row) {
|
|
|
|
auto chatId = row[0].toLongLong();
|
|
|
|
auto lastDeliveredHistoryId = row[1].toLongLong();
|
|
|
|
|
|
|
|
upgradeQueries += QString("INSERT INTO broken_messages "
|
|
|
|
"SELECT faux_offline_pending.id FROM "
|
|
|
|
"history JOIN faux_offline_pending "
|
|
|
|
"ON faux_offline_pending.id = history.id "
|
|
|
|
"WHERE history.chat_id=%1 "
|
|
|
|
"AND history.id < %2;").arg(chatId).arg(lastDeliveredHistoryId);
|
|
|
|
};
|
|
|
|
// note this doesn't modify the db, just generate new queries, so is safe
|
|
|
|
// to run outside of our upgrade transaction
|
|
|
|
if (!db.execNow({lastDeliveredQuery, rowCallback})) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
upgradeQueries += QString(
|
|
|
|
"DELETE FROM faux_offline_pending "
|
|
|
|
"WHERE id in ("
|
|
|
|
"SELECT id FROM broken_messages);");
|
|
|
|
|
|
|
|
upgradeQueries += RawDatabase::Query(QStringLiteral("PRAGMA user_version = 2;"));
|
|
|
|
|
|
|
|
return db.execNow(upgradeQueries);
|
|
|
|
}
|
|
|
|
|
2019-10-11 14:13:26 +08:00
|
|
|
bool dbSchema2to3(RawDatabase& db)
|
|
|
|
{
|
|
|
|
// Any faux_offline_pending message with the content "/me " are action
|
|
|
|
// messages that qTox previously let a user enter, but that will cause an
|
|
|
|
// action type message to be sent to toxcore, with 0 length, which will
|
|
|
|
// always fail. They must be be moved from faux_offline_pending to broken_messages
|
|
|
|
// to avoid qTox from erroring trying to send them on every connect
|
|
|
|
|
|
|
|
const QString emptyActionMessageString = "/me ";
|
|
|
|
|
|
|
|
QVector<RawDatabase::Query> upgradeQueries;
|
|
|
|
upgradeQueries += RawDatabase::Query{QString("INSERT INTO broken_messages "
|
|
|
|
"SELECT faux_offline_pending.id FROM "
|
|
|
|
"history JOIN faux_offline_pending "
|
|
|
|
"ON faux_offline_pending.id = history.id "
|
|
|
|
"WHERE history.message = ?;"),
|
|
|
|
{emptyActionMessageString.toUtf8()}};
|
|
|
|
|
|
|
|
upgradeQueries += QString(
|
|
|
|
"DELETE FROM faux_offline_pending "
|
|
|
|
"WHERE id in ("
|
|
|
|
"SELECT id FROM broken_messages);");
|
|
|
|
|
|
|
|
upgradeQueries += RawDatabase::Query(QStringLiteral("PRAGMA user_version = 3;"));
|
|
|
|
|
|
|
|
return db.execNow(upgradeQueries);
|
|
|
|
}
|
|
|
|
|
2019-05-01 15:50:19 +08:00
|
|
|
/**
|
|
|
|
* @brief Upgrade the db schema
|
|
|
|
* @note On future alterations of the database all you have to do is bump the SCHEMA_VERSION
|
|
|
|
* variable and add another case to the switch statement below. Make sure to fall through on each case.
|
|
|
|
*/
|
2019-10-07 15:08:01 +08:00
|
|
|
void dbSchemaUpgrade(std::shared_ptr<RawDatabase>& db)
|
2019-05-01 15:50:19 +08:00
|
|
|
{
|
|
|
|
int64_t databaseSchemaVersion;
|
|
|
|
|
|
|
|
if (!db->execNow(RawDatabase::Query("PRAGMA user_version", [&](const QVector<QVariant>& row) {
|
|
|
|
databaseSchemaVersion = row[0].toLongLong();
|
|
|
|
}))) {
|
|
|
|
qCritical() << "History failed to read user_version";
|
|
|
|
db.reset();
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (databaseSchemaVersion > SCHEMA_VERSION) {
|
|
|
|
qWarning() << "Database version is newer than we currently support. Please upgrade qTox";
|
|
|
|
// We don't know what future versions have done, we have to disable db access until we re-upgrade
|
|
|
|
db.reset();
|
|
|
|
return;
|
|
|
|
} else if (databaseSchemaVersion == SCHEMA_VERSION) {
|
|
|
|
// No work to do
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (databaseSchemaVersion) {
|
2019-10-07 15:08:01 +08:00
|
|
|
case 0: {
|
2019-05-01 15:50:19 +08:00
|
|
|
// Note: 0 is a special version that is actually two versions.
|
|
|
|
// possibility 1) it is a newly created database and it neesds the current schema to be created.
|
|
|
|
// possibility 2) it is a old existing database, before version 1 and before we saved schema version,
|
2019-10-07 10:09:42 +08:00
|
|
|
// and needs to be updated.
|
2019-10-07 15:08:01 +08:00
|
|
|
bool success = false;
|
|
|
|
const bool newDb = isNewDb(db, success);
|
|
|
|
if (!success) {
|
|
|
|
qCritical() << "Failed to create current db schema";
|
|
|
|
db.reset();
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (newDb) {
|
|
|
|
if (!createCurrentSchema(*db)) {
|
|
|
|
qCritical() << "Failed to create current db schema";
|
|
|
|
db.reset();
|
|
|
|
return;
|
|
|
|
}
|
2019-05-01 15:50:19 +08:00
|
|
|
qDebug() << "Database created at schema version" << SCHEMA_VERSION;
|
|
|
|
break; // new db is the only case where we don't incrementally upgrade through each version
|
|
|
|
} else {
|
2019-10-07 15:08:01 +08:00
|
|
|
if (!dbSchema0to1(*db)) {
|
|
|
|
qCritical() << "Failed to upgrade db to schema version 1, aborting";
|
|
|
|
db.reset();
|
|
|
|
return;
|
|
|
|
}
|
2019-10-07 10:09:42 +08:00
|
|
|
qDebug() << "Database upgraded incrementally to schema version 1";
|
2019-05-01 15:50:19 +08:00
|
|
|
}
|
2019-10-07 15:08:01 +08:00
|
|
|
}
|
2019-05-01 15:50:19 +08:00
|
|
|
// fallthrough
|
fix(history): move stuck pending message into broken_messages table
Fix #5776
Due to a long standing bug, faux offline message have been able to become stuck
going back years. Because of recent fixes to history loading, faux offline
messages will correctly all be sent on connection, but this causes an issue of
long stuck messages suddenly being delivered to a friend, out of context,
creating a confusing interaction. To work around this, this upgrade moves any
faux offline messages in a chat that are older than the last successfully
delivered message, indicating they were stuck, to a new table,
`broken_messages`, preventing them from ever being sent in the future.
2019-10-06 16:23:24 +08:00
|
|
|
case 1:
|
|
|
|
if (!dbSchema1to2(*db)) {
|
|
|
|
qCritical() << "Failed to upgrade db to schema version 2, aborting";
|
|
|
|
db.reset();
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
qDebug() << "Database upgraded incrementally to schema version 2";
|
|
|
|
//fallthrough
|
2019-10-11 14:13:26 +08:00
|
|
|
case 2:
|
|
|
|
if (!dbSchema2to3(*db)) {
|
|
|
|
qCritical() << "Failed to upgrade db to schema version 3, aborting";
|
|
|
|
db.reset();
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
qDebug() << "Database upgraded incrementally to schema version 3";
|
2019-05-01 15:50:19 +08:00
|
|
|
// etc.
|
|
|
|
default:
|
2019-10-07 10:09:42 +08:00
|
|
|
qInfo() << "Database upgrade finished (databaseSchemaVersion" << databaseSchemaVersion
|
2019-05-01 15:50:19 +08:00
|
|
|
<< "->" << SCHEMA_VERSION << ")";
|
|
|
|
}
|
|
|
|
}
|
2019-10-08 12:41:54 +08:00
|
|
|
|
|
|
|
MessageState getMessageState(bool isPending, bool isBroken)
|
|
|
|
{
|
|
|
|
assert(!(isPending && isBroken));
|
|
|
|
MessageState messageState;
|
|
|
|
if (isPending) {
|
|
|
|
messageState = MessageState::pending;
|
|
|
|
} else if (isBroken) {
|
|
|
|
messageState = MessageState::broken;
|
|
|
|
} else {
|
|
|
|
messageState = MessageState::complete;
|
|
|
|
}
|
|
|
|
return messageState;
|
|
|
|
}
|
2019-05-01 15:50:19 +08:00
|
|
|
} // namespace
|
|
|
|
|
2016-07-27 06:20:54 +08:00
|
|
|
/**
|
2016-08-01 16:21:23 +08:00
|
|
|
* @class History
|
|
|
|
* @brief Interacts with the profile database to save the chat history.
|
|
|
|
*
|
|
|
|
* @var QHash<QString, int64_t> History::peers
|
|
|
|
* @brief Maps friend public keys to unique IDs by index.
|
|
|
|
* Caches mappings to speed up message saving.
|
|
|
|
*/
|
2016-07-27 06:20:54 +08:00
|
|
|
|
2018-09-24 16:05:43 +08:00
|
|
|
static constexpr int NUM_MESSAGES_DEFAULT =
|
|
|
|
100; // arbitrary number of messages loaded when not loading by date
|
|
|
|
|
|
|
|
FileDbInsertionData::FileDbInsertionData()
|
|
|
|
{
|
|
|
|
static int id = qRegisterMetaType<FileDbInsertionData>();
|
|
|
|
(void)id;
|
|
|
|
}
|
2018-04-24 07:51:26 +08:00
|
|
|
|
2016-07-27 06:20:54 +08:00
|
|
|
/**
|
2016-10-20 17:00:05 +08:00
|
|
|
* @brief Prepares the database to work with the history.
|
|
|
|
* @param db This database will be prepared for use with the history.
|
2016-08-01 16:21:23 +08:00
|
|
|
*/
|
2016-10-20 17:00:05 +08:00
|
|
|
History::History(std::shared_ptr<RawDatabase> db)
|
|
|
|
: db(db)
|
2015-12-17 18:24:01 +08:00
|
|
|
{
|
2017-02-26 19:52:45 +08:00
|
|
|
if (!isValid()) {
|
2016-10-20 17:00:05 +08:00
|
|
|
qWarning() << "Database not open, init failed";
|
|
|
|
return;
|
|
|
|
}
|
2015-12-17 18:24:01 +08:00
|
|
|
|
2019-05-01 15:50:19 +08:00
|
|
|
dbSchemaUpgrade(db);
|
2018-09-24 16:05:43 +08:00
|
|
|
|
|
|
|
// dbSchemaUpgrade may have put us in an invalid state
|
|
|
|
if (!isValid()) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2018-09-24 16:05:43 +08:00
|
|
|
connect(this, &History::fileInsertionReady, this, &History::onFileInsertionReady);
|
2018-09-29 17:28:17 +08:00
|
|
|
connect(this, &History::fileInserted, this, &History::onFileInserted);
|
2018-09-24 16:05:43 +08:00
|
|
|
|
2016-10-20 17:00:05 +08:00
|
|
|
// Cache our current peers
|
2017-02-26 19:52:45 +08:00
|
|
|
db->execLater(RawDatabase::Query{"SELECT public_key, id FROM peers;",
|
|
|
|
[this](const QVector<QVariant>& row) {
|
|
|
|
peers[row[0].toString()] = row[1].toInt();
|
|
|
|
}});
|
2015-12-17 18:24:01 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
History::~History()
|
|
|
|
{
|
2017-02-26 19:52:45 +08:00
|
|
|
if (!isValid()) {
|
2016-10-20 17:00:05 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2015-12-17 18:24:01 +08:00
|
|
|
// We could have execLater requests pending with a lambda attached,
|
|
|
|
// so clear the pending transactions first
|
2016-10-20 17:00:05 +08:00
|
|
|
db->sync();
|
2015-12-17 18:24:01 +08:00
|
|
|
}
|
|
|
|
|
2016-07-27 06:20:54 +08:00
|
|
|
/**
|
2016-08-01 16:21:23 +08:00
|
|
|
* @brief Checks if the database was opened successfully
|
|
|
|
* @return True if database if opened, false otherwise.
|
|
|
|
*/
|
2015-12-17 18:24:01 +08:00
|
|
|
bool History::isValid()
|
|
|
|
{
|
2016-10-20 17:00:05 +08:00
|
|
|
return db && db->isOpen();
|
2015-12-19 23:51:15 +08:00
|
|
|
}
|
|
|
|
|
2018-10-05 10:24:39 +08:00
|
|
|
/**
|
|
|
|
* @brief Checks if a friend has chat history
|
|
|
|
* @param friendPk
|
|
|
|
* @return True if has, false otherwise.
|
|
|
|
*/
|
2019-05-26 08:11:44 +08:00
|
|
|
bool History::historyExists(const ToxPk& friendPk)
|
2018-10-05 10:24:39 +08:00
|
|
|
{
|
2019-05-26 08:11:44 +08:00
|
|
|
return !getMessagesForFriend(friendPk, 0, 1).empty();
|
2018-10-05 10:24:39 +08:00
|
|
|
}
|
|
|
|
|
2016-07-27 06:20:54 +08:00
|
|
|
/**
|
2016-08-01 16:21:23 +08:00
|
|
|
* @brief Erases all the chat history from the database.
|
|
|
|
*/
|
2015-12-17 18:24:01 +08:00
|
|
|
void History::eraseHistory()
|
|
|
|
{
|
2017-02-26 19:52:45 +08:00
|
|
|
if (!isValid()) {
|
2016-10-20 17:00:05 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
db->execNow("DELETE FROM faux_offline_pending;"
|
2017-02-26 19:52:45 +08:00
|
|
|
"DELETE FROM history;"
|
|
|
|
"DELETE FROM aliases;"
|
|
|
|
"DELETE FROM peers;"
|
2018-09-24 16:05:43 +08:00
|
|
|
"DELETE FROM file_transfers;"
|
fix(history): move stuck pending message into broken_messages table
Fix #5776
Due to a long standing bug, faux offline message have been able to become stuck
going back years. Because of recent fixes to history loading, faux offline
messages will correctly all be sent on connection, but this causes an issue of
long stuck messages suddenly being delivered to a friend, out of context,
creating a confusing interaction. To work around this, this upgrade moves any
faux offline messages in a chat that are older than the last successfully
delivered message, indicating they were stuck, to a new table,
`broken_messages`, preventing them from ever being sent in the future.
2019-10-06 16:23:24 +08:00
|
|
|
"DELETE FROM broken_messages;"
|
2017-02-26 19:52:45 +08:00
|
|
|
"VACUUM;");
|
2015-12-17 18:24:01 +08:00
|
|
|
}
|
|
|
|
|
2016-07-27 06:20:54 +08:00
|
|
|
/**
|
2016-08-01 16:21:23 +08:00
|
|
|
* @brief Erases the chat history with one friend.
|
|
|
|
* @param friendPk Friend public key to erase.
|
|
|
|
*/
|
2016-10-20 17:00:05 +08:00
|
|
|
void History::removeFriendHistory(const QString& friendPk)
|
2015-12-17 18:24:01 +08:00
|
|
|
{
|
2017-02-26 19:52:45 +08:00
|
|
|
if (!isValid()) {
|
2016-10-20 17:00:05 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2017-02-26 19:52:45 +08:00
|
|
|
if (!peers.contains(friendPk)) {
|
2015-12-17 18:24:01 +08:00
|
|
|
return;
|
2016-10-20 17:00:05 +08:00
|
|
|
}
|
|
|
|
|
2015-12-17 18:24:01 +08:00
|
|
|
int64_t id = peers[friendPk];
|
|
|
|
|
2017-02-26 19:52:45 +08:00
|
|
|
QString queryText = QString("DELETE FROM faux_offline_pending "
|
|
|
|
"WHERE faux_offline_pending.id IN ( "
|
|
|
|
" SELECT faux_offline_pending.id FROM faux_offline_pending "
|
|
|
|
" LEFT JOIN history ON faux_offline_pending.id = history.id "
|
|
|
|
" WHERE chat_id=%1 "
|
|
|
|
"); "
|
fix(history): move stuck pending message into broken_messages table
Fix #5776
Due to a long standing bug, faux offline message have been able to become stuck
going back years. Because of recent fixes to history loading, faux offline
messages will correctly all be sent on connection, but this causes an issue of
long stuck messages suddenly being delivered to a friend, out of context,
creating a confusing interaction. To work around this, this upgrade moves any
faux offline messages in a chat that are older than the last successfully
delivered message, indicating they were stuck, to a new table,
`broken_messages`, preventing them from ever being sent in the future.
2019-10-06 16:23:24 +08:00
|
|
|
"DELETE FROM broken_messages "
|
|
|
|
"WHERE broken_messages.id IN ( "
|
|
|
|
" SELECT broken_messages.id FROM broken_messages "
|
|
|
|
" LEFT JOIN history ON broken_messages.id = history.id "
|
|
|
|
" WHERE chat_id=%1 "
|
|
|
|
"); "
|
2017-02-26 19:52:45 +08:00
|
|
|
"DELETE FROM history WHERE chat_id=%1; "
|
|
|
|
"DELETE FROM aliases WHERE owner=%1; "
|
|
|
|
"DELETE FROM peers WHERE id=%1; "
|
2018-09-24 16:05:43 +08:00
|
|
|
"DELETE FROM file_transfers WHERE chat_id=%1;"
|
2017-02-26 19:52:45 +08:00
|
|
|
"VACUUM;")
|
|
|
|
.arg(id);
|
|
|
|
|
|
|
|
if (db->execNow(queryText)) {
|
2015-12-17 18:24:01 +08:00
|
|
|
peers.remove(friendPk);
|
2017-02-26 19:52:45 +08:00
|
|
|
} else {
|
2015-12-17 18:24:01 +08:00
|
|
|
qWarning() << "Failed to remove friend's history";
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-07-27 06:20:54 +08:00
|
|
|
/**
|
2016-08-01 16:21:23 +08:00
|
|
|
* @brief Generate query to insert new message in database
|
|
|
|
* @param friendPk Friend publick key to save.
|
|
|
|
* @param message Message to save.
|
|
|
|
* @param sender Sender to save.
|
|
|
|
* @param time Time of message sending.
|
2019-10-08 11:58:55 +08:00
|
|
|
* @param isDelivered True if message was already delivered.
|
2016-08-01 16:21:23 +08:00
|
|
|
* @param dispName Name, which should be displayed.
|
|
|
|
* @param insertIdCallback Function, called after query execution.
|
|
|
|
*/
|
2017-02-26 19:52:45 +08:00
|
|
|
QVector<RawDatabase::Query>
|
|
|
|
History::generateNewMessageQueries(const QString& friendPk, const QString& message,
|
2019-10-08 11:58:55 +08:00
|
|
|
const QString& sender, const QDateTime& time, bool isDelivered,
|
2019-01-27 19:52:19 +08:00
|
|
|
QString dispName, std::function<void(RowId)> insertIdCallback)
|
2015-12-17 18:24:01 +08:00
|
|
|
{
|
|
|
|
QVector<RawDatabase::Query> queries;
|
|
|
|
|
|
|
|
// Get the db id of the peer we're chatting with
|
|
|
|
int64_t peerId;
|
2017-02-26 19:52:45 +08:00
|
|
|
if (peers.contains(friendPk)) {
|
2018-09-24 16:05:43 +08:00
|
|
|
peerId = (peers)[friendPk];
|
2017-02-26 19:52:45 +08:00
|
|
|
} else {
|
|
|
|
if (peers.isEmpty()) {
|
2015-12-17 18:24:01 +08:00
|
|
|
peerId = 0;
|
2017-02-26 19:52:45 +08:00
|
|
|
} else {
|
2016-10-20 17:00:05 +08:00
|
|
|
peerId = *std::max_element(peers.begin(), peers.end()) + 1;
|
|
|
|
}
|
|
|
|
|
2018-09-24 16:05:43 +08:00
|
|
|
(peers)[friendPk] = peerId;
|
2016-10-20 17:00:05 +08:00
|
|
|
queries += RawDatabase::Query(("INSERT INTO peers (id, public_key) "
|
2017-02-26 19:52:45 +08:00
|
|
|
"VALUES (%1, '"
|
|
|
|
+ friendPk + "');")
|
|
|
|
.arg(peerId));
|
2015-12-17 18:24:01 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Get the db id of the sender of the message
|
|
|
|
int64_t senderId;
|
2017-02-26 19:52:45 +08:00
|
|
|
if (peers.contains(sender)) {
|
2018-09-24 16:05:43 +08:00
|
|
|
senderId = (peers)[sender];
|
2017-02-26 19:52:45 +08:00
|
|
|
} else {
|
|
|
|
if (peers.isEmpty()) {
|
2015-12-17 18:24:01 +08:00
|
|
|
senderId = 0;
|
2017-02-26 19:52:45 +08:00
|
|
|
} else {
|
2016-10-20 17:00:05 +08:00
|
|
|
senderId = *std::max_element(peers.begin(), peers.end()) + 1;
|
|
|
|
}
|
|
|
|
|
2018-09-24 16:05:43 +08:00
|
|
|
(peers)[sender] = senderId;
|
2016-10-20 17:00:05 +08:00
|
|
|
queries += RawDatabase::Query{("INSERT INTO peers (id, public_key) "
|
2017-02-26 19:52:45 +08:00
|
|
|
"VALUES (%1, '"
|
|
|
|
+ sender + "');")
|
|
|
|
.arg(senderId)};
|
2015-12-17 18:24:01 +08:00
|
|
|
}
|
|
|
|
|
2017-02-26 19:52:45 +08:00
|
|
|
queries += RawDatabase::Query(
|
|
|
|
QString("INSERT OR IGNORE INTO aliases (owner, display_name) VALUES (%1, ?);").arg(senderId),
|
|
|
|
{dispName.toUtf8()});
|
2015-12-17 18:24:01 +08:00
|
|
|
|
2017-02-26 19:52:45 +08:00
|
|
|
// If the alias already existed, the insert will ignore the conflict and last_insert_rowid()
|
|
|
|
// will return garbage,
|
2015-12-17 18:24:01 +08:00
|
|
|
// so we have to check changes() and manually fetch the row ID in this case
|
2017-02-26 19:52:45 +08:00
|
|
|
queries +=
|
|
|
|
RawDatabase::Query(QString(
|
|
|
|
"INSERT INTO history (timestamp, chat_id, message, sender_alias) "
|
|
|
|
"VALUES (%1, %2, ?, ("
|
|
|
|
" CASE WHEN changes() IS 0 THEN ("
|
|
|
|
" SELECT id FROM aliases WHERE owner=%3 AND display_name=?)"
|
|
|
|
" ELSE last_insert_rowid() END"
|
|
|
|
"));")
|
|
|
|
.arg(time.toMSecsSinceEpoch())
|
|
|
|
.arg(peerId)
|
|
|
|
.arg(senderId),
|
|
|
|
{message.toUtf8(), dispName.toUtf8()}, insertIdCallback);
|
|
|
|
|
2019-10-08 11:58:55 +08:00
|
|
|
if (!isDelivered) {
|
2017-02-26 19:52:45 +08:00
|
|
|
queries += RawDatabase::Query{"INSERT INTO faux_offline_pending (id) VALUES ("
|
|
|
|
" last_insert_rowid()"
|
|
|
|
");"};
|
2016-10-20 17:00:05 +08:00
|
|
|
}
|
2015-12-17 18:24:01 +08:00
|
|
|
|
|
|
|
return queries;
|
|
|
|
}
|
|
|
|
|
2018-09-24 16:05:43 +08:00
|
|
|
void History::onFileInsertionReady(FileDbInsertionData data)
|
|
|
|
{
|
|
|
|
|
|
|
|
QVector<RawDatabase::Query> queries;
|
|
|
|
std::weak_ptr<History> weakThis = shared_from_this();
|
|
|
|
|
|
|
|
// peerId is guaranteed to be inserted since we just used it in addNewMessage
|
|
|
|
auto peerId = peers[data.friendPk];
|
2018-09-29 17:28:17 +08:00
|
|
|
// Copy to pass into labmda for later
|
|
|
|
auto fileId = data.fileId;
|
2018-09-24 16:05:43 +08:00
|
|
|
queries +=
|
2018-12-06 12:50:29 +08:00
|
|
|
RawDatabase::Query(QStringLiteral(
|
|
|
|
"INSERT INTO file_transfers (chat_id, file_restart_id, "
|
|
|
|
"file_path, file_name, file_hash, file_size, direction, file_state) "
|
|
|
|
"VALUES (%1, ?, ?, ?, ?, %2, %3, %4);")
|
2018-09-24 16:05:43 +08:00
|
|
|
.arg(peerId)
|
|
|
|
.arg(data.size)
|
|
|
|
.arg(static_cast<int>(data.direction))
|
|
|
|
.arg(ToxFile::CANCELED),
|
2018-12-12 19:31:04 +08:00
|
|
|
{data.fileId.toUtf8(), data.filePath.toUtf8(), data.fileName.toUtf8(), QByteArray()},
|
2019-01-27 19:52:19 +08:00
|
|
|
[weakThis, fileId](RowId id) {
|
2018-09-29 17:28:17 +08:00
|
|
|
auto pThis = weakThis.lock();
|
|
|
|
if (pThis) {
|
|
|
|
emit pThis->fileInserted(id, fileId);
|
|
|
|
}
|
|
|
|
});
|
2018-09-24 16:05:43 +08:00
|
|
|
|
|
|
|
|
|
|
|
queries += RawDatabase::Query(QStringLiteral("UPDATE history "
|
|
|
|
"SET file_id = (last_insert_rowid()) "
|
|
|
|
"WHERE id = %1")
|
2019-01-27 19:52:19 +08:00
|
|
|
.arg(data.historyId.get()));
|
2018-09-24 16:05:43 +08:00
|
|
|
|
|
|
|
db->execLater(queries);
|
|
|
|
}
|
|
|
|
|
2019-01-27 19:52:19 +08:00
|
|
|
void History::onFileInserted(RowId dbId, QString fileId)
|
2018-09-29 17:28:17 +08:00
|
|
|
{
|
|
|
|
auto& fileInfo = fileInfos[fileId];
|
|
|
|
if (fileInfo.finished) {
|
2018-12-06 12:50:29 +08:00
|
|
|
db->execLater(
|
|
|
|
generateFileFinished(dbId, fileInfo.success, fileInfo.filePath, fileInfo.fileHash));
|
2018-09-29 17:28:17 +08:00
|
|
|
fileInfos.remove(fileId);
|
|
|
|
} else {
|
|
|
|
fileInfo.finished = false;
|
|
|
|
fileInfo.fileId = dbId;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-01-27 19:52:19 +08:00
|
|
|
RawDatabase::Query History::generateFileFinished(RowId id, bool success, const QString& filePath,
|
2018-12-06 12:50:29 +08:00
|
|
|
const QByteArray& fileHash)
|
2018-09-29 17:28:17 +08:00
|
|
|
{
|
|
|
|
auto file_state = success ? ToxFile::FINISHED : ToxFile::CANCELED;
|
|
|
|
if (filePath.length()) {
|
|
|
|
return RawDatabase::Query(QStringLiteral("UPDATE file_transfers "
|
2018-11-30 17:40:46 +08:00
|
|
|
"SET file_state = %1, file_path = ?, file_hash = ?"
|
2018-09-29 17:28:17 +08:00
|
|
|
"WHERE id = %2")
|
|
|
|
.arg(file_state)
|
2019-01-27 19:52:19 +08:00
|
|
|
.arg(id.get()),
|
2018-11-30 17:40:46 +08:00
|
|
|
{filePath.toUtf8(), fileHash});
|
2018-09-29 17:28:17 +08:00
|
|
|
} else {
|
|
|
|
return RawDatabase::Query(QStringLiteral("UPDATE file_transfers "
|
|
|
|
"SET finished = %1 "
|
|
|
|
"WHERE id = %2")
|
|
|
|
.arg(file_state)
|
2019-01-27 19:52:19 +08:00
|
|
|
.arg(id.get()));
|
2018-09-29 17:28:17 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-09-24 16:05:43 +08:00
|
|
|
void History::addNewFileMessage(const QString& friendPk, const QString& fileId,
|
2018-12-12 19:31:04 +08:00
|
|
|
const QString& fileName, const QString& filePath, int64_t size,
|
2018-09-24 16:05:43 +08:00
|
|
|
const QString& sender, const QDateTime& time, QString const& dispName)
|
|
|
|
{
|
|
|
|
// This is an incredibly far from an optimal way of implementing this,
|
|
|
|
// but given the frequency that people are going to be initiating a file
|
|
|
|
// transfer we can probably live with it.
|
|
|
|
|
|
|
|
// Since both inserting an alias for a user and inserting a file transfer
|
|
|
|
// will generate new ids, there is no good way to inject both new ids into the
|
|
|
|
// history query without refactoring our RawDatabase::Query and processor loops.
|
|
|
|
|
|
|
|
// What we will do instead is chain callbacks to try to get reasonable behavior.
|
|
|
|
// We can call the generateNewMessageQueries() fn to insert a message with an empty
|
|
|
|
// message in it, and get the id with the callbck. Once we have the id we can ammend
|
|
|
|
// the data to have our newly inserted file_id as well
|
|
|
|
|
|
|
|
ToxFile::FileDirection direction;
|
|
|
|
if (sender == friendPk) {
|
|
|
|
direction = ToxFile::RECEIVING;
|
|
|
|
} else {
|
|
|
|
direction = ToxFile::SENDING;
|
|
|
|
}
|
|
|
|
|
|
|
|
std::weak_ptr<History> weakThis = shared_from_this();
|
|
|
|
FileDbInsertionData insertionData;
|
|
|
|
insertionData.friendPk = friendPk;
|
|
|
|
insertionData.fileId = fileId;
|
|
|
|
insertionData.fileName = fileName;
|
|
|
|
insertionData.filePath = filePath;
|
|
|
|
insertionData.size = size;
|
|
|
|
insertionData.direction = direction;
|
|
|
|
|
2019-01-27 19:52:19 +08:00
|
|
|
auto insertFileTransferFn = [weakThis, insertionData](RowId messageId) {
|
2018-09-24 16:05:43 +08:00
|
|
|
auto insertionDataRw = std::move(insertionData);
|
|
|
|
|
|
|
|
insertionDataRw.historyId = messageId;
|
|
|
|
|
|
|
|
auto thisPtr = weakThis.lock();
|
|
|
|
if (thisPtr)
|
|
|
|
emit thisPtr->fileInsertionReady(std::move(insertionDataRw));
|
|
|
|
};
|
|
|
|
|
|
|
|
addNewMessage(friendPk, "", sender, time, true, dispName, insertFileTransferFn);
|
|
|
|
}
|
|
|
|
|
2016-07-27 06:20:54 +08:00
|
|
|
/**
|
2016-08-01 16:21:23 +08:00
|
|
|
* @brief Saves a chat message in the database.
|
|
|
|
* @param friendPk Friend publick key to save.
|
|
|
|
* @param message Message to save.
|
|
|
|
* @param sender Sender to save.
|
|
|
|
* @param time Time of message sending.
|
2019-10-08 11:58:55 +08:00
|
|
|
* @param isDelivered True if message was already delivered.
|
2016-08-01 16:21:23 +08:00
|
|
|
* @param dispName Name, which should be displayed.
|
|
|
|
* @param insertIdCallback Function, called after query execution.
|
|
|
|
*/
|
2017-02-26 19:52:45 +08:00
|
|
|
void History::addNewMessage(const QString& friendPk, const QString& message, const QString& sender,
|
2019-10-08 11:58:55 +08:00
|
|
|
const QDateTime& time, bool isDelivered, QString dispName,
|
2019-01-27 19:52:19 +08:00
|
|
|
const std::function<void(RowId)>& insertIdCallback)
|
2015-12-17 18:24:01 +08:00
|
|
|
{
|
2018-03-28 15:17:51 +08:00
|
|
|
if (!Settings::getInstance().getEnableLogging()) {
|
|
|
|
qWarning() << "Blocked a message from being added to database while history is disabled";
|
|
|
|
return;
|
|
|
|
}
|
2017-02-26 19:52:45 +08:00
|
|
|
if (!isValid()) {
|
2016-10-20 17:00:05 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2019-10-08 11:58:55 +08:00
|
|
|
db->execLater(generateNewMessageQueries(friendPk, message, sender, time, isDelivered, dispName,
|
2017-02-26 19:52:45 +08:00
|
|
|
insertIdCallback));
|
2015-12-17 18:24:01 +08:00
|
|
|
}
|
|
|
|
|
2018-12-06 12:50:29 +08:00
|
|
|
void History::setFileFinished(const QString& fileId, bool success, const QString& filePath,
|
|
|
|
const QByteArray& fileHash)
|
2018-09-29 17:28:17 +08:00
|
|
|
{
|
|
|
|
auto& fileInfo = fileInfos[fileId];
|
2019-01-27 19:52:19 +08:00
|
|
|
if (fileInfo.fileId.get() == -1) {
|
2018-09-29 17:28:17 +08:00
|
|
|
fileInfo.finished = true;
|
|
|
|
fileInfo.success = success;
|
|
|
|
fileInfo.filePath = filePath;
|
2018-11-30 17:40:46 +08:00
|
|
|
fileInfo.fileHash = fileHash;
|
2018-09-29 17:28:17 +08:00
|
|
|
} else {
|
2018-11-30 17:40:46 +08:00
|
|
|
db->execLater(generateFileFinished(fileInfo.fileId, success, filePath, fileHash));
|
2018-09-29 17:28:17 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
fileInfos.remove(fileId);
|
|
|
|
}
|
2019-06-18 09:06:28 +08:00
|
|
|
|
|
|
|
size_t History::getNumMessagesForFriend(const ToxPk& friendPk)
|
|
|
|
{
|
2019-06-23 18:20:48 +08:00
|
|
|
return getNumMessagesForFriendBeforeDate(friendPk, QDateTime());
|
2019-06-18 09:06:28 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
size_t History::getNumMessagesForFriendBeforeDate(const ToxPk& friendPk, const QDateTime& date)
|
|
|
|
{
|
|
|
|
QString queryText = QString("SELECT COUNT(history.id) "
|
|
|
|
"FROM history "
|
|
|
|
"JOIN peers chat ON chat_id = chat.id "
|
2019-06-23 18:20:48 +08:00
|
|
|
"WHERE chat.public_key='%1'")
|
|
|
|
.arg(friendPk.toString());
|
|
|
|
|
|
|
|
if (date.isNull()) {
|
|
|
|
queryText += ";";
|
|
|
|
} else {
|
|
|
|
queryText += QString(" AND timestamp < %1;").arg(date.toMSecsSinceEpoch());
|
|
|
|
}
|
2019-06-18 09:06:28 +08:00
|
|
|
|
|
|
|
size_t numMessages = 0;
|
|
|
|
auto rowCallback = [&numMessages](const QVector<QVariant>& row) {
|
|
|
|
numMessages = row[0].toLongLong();
|
|
|
|
};
|
|
|
|
|
|
|
|
db->execNow({queryText, rowCallback});
|
|
|
|
|
|
|
|
return numMessages;
|
|
|
|
}
|
|
|
|
|
|
|
|
QList<History::HistMessage> History::getMessagesForFriend(const ToxPk& friendPk, size_t firstIdx,
|
|
|
|
size_t lastIdx)
|
|
|
|
{
|
|
|
|
QList<HistMessage> messages;
|
|
|
|
|
|
|
|
// Don't forget to update the rowCallback if you change the selected columns!
|
|
|
|
QString queryText =
|
|
|
|
QString("SELECT history.id, faux_offline_pending.id, timestamp, "
|
|
|
|
"chat.public_key, aliases.display_name, sender.public_key, "
|
|
|
|
"message, file_transfers.file_restart_id, "
|
|
|
|
"file_transfers.file_path, file_transfers.file_name, "
|
|
|
|
"file_transfers.file_size, file_transfers.direction, "
|
2019-10-08 11:38:24 +08:00
|
|
|
"file_transfers.file_state, broken_messages.id FROM history "
|
2019-06-18 09:06:28 +08:00
|
|
|
"LEFT JOIN faux_offline_pending ON history.id = faux_offline_pending.id "
|
|
|
|
"JOIN peers chat ON history.chat_id = chat.id "
|
|
|
|
"JOIN aliases ON sender_alias = aliases.id "
|
|
|
|
"JOIN peers sender ON aliases.owner = sender.id "
|
|
|
|
"LEFT JOIN file_transfers ON history.file_id = file_transfers.id "
|
2019-10-08 11:38:24 +08:00
|
|
|
"LEFT JOIN broken_messages ON history.id = broken_messages.id "
|
2019-06-18 09:06:28 +08:00
|
|
|
"WHERE chat.public_key='%1' "
|
|
|
|
"LIMIT %2 OFFSET %3;")
|
|
|
|
.arg(friendPk.toString())
|
|
|
|
.arg(lastIdx - firstIdx)
|
|
|
|
.arg(firstIdx);
|
|
|
|
|
|
|
|
auto rowCallback = [&messages](const QVector<QVariant>& row) {
|
|
|
|
// dispName and message could have null bytes, QString::fromUtf8
|
|
|
|
// truncates on null bytes so we strip them
|
|
|
|
auto id = RowId{row[0].toLongLong()};
|
2019-10-08 11:58:55 +08:00
|
|
|
auto isPending = !row[1].isNull();
|
2019-06-18 09:06:28 +08:00
|
|
|
auto timestamp = QDateTime::fromMSecsSinceEpoch(row[2].toLongLong());
|
|
|
|
auto friend_key = row[3].toString();
|
|
|
|
auto display_name = QString::fromUtf8(row[4].toByteArray().replace('\0', ""));
|
|
|
|
auto sender_key = row[5].toString();
|
2019-10-08 11:38:24 +08:00
|
|
|
auto isBroken = !row[13].isNull();
|
2019-10-08 12:41:54 +08:00
|
|
|
|
|
|
|
MessageState messageState = getMessageState(isPending, isBroken);
|
|
|
|
|
2019-06-18 09:06:28 +08:00
|
|
|
if (row[7].isNull()) {
|
2019-10-08 12:41:54 +08:00
|
|
|
messages += {id, messageState, timestamp, friend_key,
|
2019-10-08 11:58:55 +08:00
|
|
|
display_name, sender_key, row[6].toString()};
|
2019-06-18 09:06:28 +08:00
|
|
|
} else {
|
|
|
|
ToxFile file;
|
|
|
|
file.fileKind = TOX_FILE_KIND_DATA;
|
|
|
|
file.resumeFileId = row[7].toString().toUtf8();
|
|
|
|
file.filePath = row[8].toString();
|
|
|
|
file.fileName = row[9].toString();
|
|
|
|
file.filesize = row[10].toLongLong();
|
|
|
|
file.direction = static_cast<ToxFile::FileDirection>(row[11].toLongLong());
|
|
|
|
file.status = static_cast<ToxFile::FileStatus>(row[12].toInt());
|
|
|
|
messages +=
|
2019-10-08 12:41:54 +08:00
|
|
|
{id, messageState, timestamp, friend_key, display_name, sender_key, file};
|
2019-06-18 09:06:28 +08:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
db->execNow({queryText, rowCallback});
|
|
|
|
|
|
|
|
return messages;
|
|
|
|
}
|
|
|
|
|
2019-10-08 11:58:55 +08:00
|
|
|
QList<History::HistMessage> History::getUndeliveredMessagesForFriend(const ToxPk& friendPk)
|
2019-06-18 09:06:28 +08:00
|
|
|
{
|
|
|
|
auto queryText =
|
|
|
|
QString("SELECT history.id, faux_offline_pending.id, timestamp, chat.public_key, "
|
2019-10-08 11:38:24 +08:00
|
|
|
"aliases.display_name, sender.public_key, message, broken_messages.id "
|
2019-06-18 09:06:28 +08:00
|
|
|
"FROM history "
|
|
|
|
"JOIN faux_offline_pending ON history.id = faux_offline_pending.id "
|
2019-08-09 12:06:42 +08:00
|
|
|
"JOIN peers chat on history.chat_id = chat.id "
|
2019-06-18 09:06:28 +08:00
|
|
|
"JOIN aliases on sender_alias = aliases.id "
|
2019-08-09 12:06:42 +08:00
|
|
|
"JOIN peers sender on aliases.owner = sender.id "
|
2019-10-08 11:38:24 +08:00
|
|
|
"LEFT JOIN broken_messages ON history.id = broken_messages.id "
|
2019-08-09 12:06:42 +08:00
|
|
|
"WHERE chat.public_key='%1';")
|
2019-06-18 09:06:28 +08:00
|
|
|
.arg(friendPk.toString());
|
|
|
|
|
|
|
|
QList<History::HistMessage> ret;
|
|
|
|
auto rowCallback = [&ret](const QVector<QVariant>& row) {
|
|
|
|
// dispName and message could have null bytes, QString::fromUtf8
|
|
|
|
// truncates on null bytes so we strip them
|
|
|
|
auto id = RowId{row[0].toLongLong()};
|
2019-10-08 11:58:55 +08:00
|
|
|
auto isPending = !row[1].isNull();
|
2019-06-18 09:06:28 +08:00
|
|
|
auto timestamp = QDateTime::fromMSecsSinceEpoch(row[2].toLongLong());
|
|
|
|
auto friend_key = row[3].toString();
|
|
|
|
auto display_name = QString::fromUtf8(row[4].toByteArray().replace('\0', ""));
|
|
|
|
auto sender_key = row[5].toString();
|
2019-10-08 11:38:24 +08:00
|
|
|
auto isBroken = !row[7].isNull();
|
2019-10-08 12:41:54 +08:00
|
|
|
|
|
|
|
MessageState messageState = getMessageState(isPending, isBroken);
|
|
|
|
|
|
|
|
ret += {id, messageState, timestamp, friend_key,
|
2019-10-08 11:58:55 +08:00
|
|
|
display_name, sender_key, row[6].toString()};
|
2019-06-18 09:06:28 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
db->execNow({queryText, rowCallback});
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2018-06-30 00:58:28 +08:00
|
|
|
/**
|
|
|
|
* @brief Search phrase in chat messages
|
|
|
|
* @param friendPk Friend public key
|
|
|
|
* @param from a date message where need to start a search
|
|
|
|
* @param phrase what need to find
|
|
|
|
* @param parameter for search
|
|
|
|
* @return date of the message where the phrase was found
|
|
|
|
*/
|
2018-09-24 16:05:43 +08:00
|
|
|
QDateTime History::getDateWhereFindPhrase(const QString& friendPk, const QDateTime& from,
|
|
|
|
QString phrase, const ParameterSearch& parameter)
|
2018-02-10 23:50:48 +08:00
|
|
|
{
|
2018-07-14 05:06:04 +08:00
|
|
|
QDateTime result;
|
|
|
|
auto rowCallback = [&result](const QVector<QVariant>& row) {
|
|
|
|
result = QDateTime::fromMSecsSinceEpoch(row[0].toLongLong());
|
2018-02-10 23:50:48 +08:00
|
|
|
};
|
|
|
|
|
2018-02-15 19:21:05 +08:00
|
|
|
phrase.replace("'", "''");
|
2018-02-14 17:30:38 +08:00
|
|
|
|
2018-06-25 02:11:20 +08:00
|
|
|
QString message;
|
|
|
|
|
|
|
|
switch (parameter.filter) {
|
|
|
|
case FilterSearch::Register:
|
2018-06-30 00:31:34 +08:00
|
|
|
message = QStringLiteral("message LIKE '%%1%'").arg(phrase);
|
2018-06-25 02:11:20 +08:00
|
|
|
break;
|
|
|
|
case FilterSearch::WordsOnly:
|
2018-09-24 16:05:43 +08:00
|
|
|
message = QStringLiteral("message REGEXP '%1'")
|
|
|
|
.arg(SearchExtraFunctions::generateFilterWordsOnly(phrase).toLower());
|
2018-06-25 02:11:20 +08:00
|
|
|
break;
|
|
|
|
case FilterSearch::RegisterAndWordsOnly:
|
2018-09-24 16:05:43 +08:00
|
|
|
message = QStringLiteral("REGEXPSENSITIVE(message, '%1')")
|
|
|
|
.arg(SearchExtraFunctions::generateFilterWordsOnly(phrase));
|
2018-06-25 02:11:20 +08:00
|
|
|
break;
|
|
|
|
case FilterSearch::Regular:
|
2018-06-30 00:31:34 +08:00
|
|
|
message = QStringLiteral("message REGEXP '%1'").arg(phrase);
|
2018-06-25 02:11:20 +08:00
|
|
|
break;
|
|
|
|
case FilterSearch::RegisterAndRegular:
|
2018-06-30 00:31:34 +08:00
|
|
|
message = QStringLiteral("REGEXPSENSITIVE(message '%1')").arg(phrase);
|
2018-06-25 02:11:20 +08:00
|
|
|
break;
|
|
|
|
default:
|
2018-06-30 00:31:34 +08:00
|
|
|
message = QStringLiteral("LOWER(message) LIKE '%%1%'").arg(phrase.toLower());
|
2018-06-25 02:11:20 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2019-05-26 03:16:47 +08:00
|
|
|
QDateTime time = from;
|
2018-12-22 06:36:38 +08:00
|
|
|
|
2019-05-26 03:16:47 +08:00
|
|
|
if (!time.isValid()) {
|
|
|
|
time = QDateTime::currentDateTime();
|
2018-12-22 06:36:38 +08:00
|
|
|
}
|
|
|
|
|
2018-08-09 02:35:28 +08:00
|
|
|
if (parameter.period == PeriodSearch::AfterDate || parameter.period == PeriodSearch::BeforeDate) {
|
2019-05-26 03:16:47 +08:00
|
|
|
time = parameter.time;
|
2018-06-25 02:11:20 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
QString period;
|
|
|
|
switch (parameter.period) {
|
|
|
|
case PeriodSearch::WithTheFirst:
|
2018-06-30 00:31:34 +08:00
|
|
|
period = QStringLiteral("ORDER BY timestamp ASC LIMIT 1;");
|
2018-06-25 02:11:20 +08:00
|
|
|
break;
|
|
|
|
case PeriodSearch::AfterDate:
|
2018-09-24 16:05:43 +08:00
|
|
|
period = QStringLiteral("AND timestamp > '%1' ORDER BY timestamp ASC LIMIT 1;")
|
2019-05-26 03:16:47 +08:00
|
|
|
.arg(time.toMSecsSinceEpoch());
|
2018-06-25 02:11:20 +08:00
|
|
|
break;
|
|
|
|
case PeriodSearch::BeforeDate:
|
2018-09-24 16:05:43 +08:00
|
|
|
period = QStringLiteral("AND timestamp < '%1' ORDER BY timestamp DESC LIMIT 1;")
|
2019-05-26 03:16:47 +08:00
|
|
|
.arg(time.toMSecsSinceEpoch());
|
2018-06-25 02:11:20 +08:00
|
|
|
break;
|
|
|
|
default:
|
2018-09-24 16:05:43 +08:00
|
|
|
period = QStringLiteral("AND timestamp < '%1' ORDER BY timestamp DESC LIMIT 1;")
|
2019-05-26 03:16:47 +08:00
|
|
|
.arg(time.toMSecsSinceEpoch());
|
2018-06-25 02:11:20 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2018-02-10 23:50:48 +08:00
|
|
|
QString queryText =
|
2018-06-30 00:31:34 +08:00
|
|
|
QStringLiteral("SELECT timestamp "
|
2018-09-24 16:05:43 +08:00
|
|
|
"FROM history "
|
|
|
|
"LEFT JOIN faux_offline_pending ON history.id = faux_offline_pending.id "
|
|
|
|
"JOIN peers chat ON chat_id = chat.id "
|
|
|
|
"WHERE chat.public_key='%1' "
|
|
|
|
"AND %2 "
|
|
|
|
"%3")
|
2018-02-12 07:02:28 +08:00
|
|
|
.arg(friendPk)
|
2018-06-25 02:11:20 +08:00
|
|
|
.arg(message)
|
|
|
|
.arg(period);
|
|
|
|
|
|
|
|
db->execNow({queryText, rowCallback});
|
|
|
|
|
2018-07-14 05:06:04 +08:00
|
|
|
return result;
|
2018-06-25 02:11:20 +08:00
|
|
|
}
|
2018-02-12 07:02:28 +08:00
|
|
|
|
2019-06-18 09:06:28 +08:00
|
|
|
/**
|
|
|
|
* @brief Gets date boundaries in conversation with friendPk. History doesn't model conversation indexes,
|
|
|
|
* but we can count messages between us and friendPk to effectively give us an index. This function
|
|
|
|
* returns how many messages have happened between us <-> friendPk each time the date changes
|
|
|
|
* @param[in] friendPk ToxPk of conversation to retrieve
|
|
|
|
* @param[in] from Start date to look from
|
|
|
|
* @param[in] maxNum Maximum number of date boundaries to retrieve
|
|
|
|
* @note This API may seem a little strange, why not use QDate from and QDate to? The intent is to
|
|
|
|
* have an API that can be used to get the first item after a date (for search) and to get a list
|
|
|
|
* of date changes (for loadHistory). We could write two separate queries but the query is fairly
|
|
|
|
* intricate compared to our other ones so reducing duplication of it is preferable.
|
|
|
|
*/
|
|
|
|
QList<History::DateIdx> History::getNumMessagesForFriendBeforeDateBoundaries(const ToxPk& friendPk,
|
|
|
|
const QDate& from,
|
|
|
|
size_t maxNum)
|
|
|
|
{
|
|
|
|
auto friendPkString = friendPk.toString();
|
|
|
|
|
|
|
|
// No guarantee that this is the most efficient way to do this...
|
|
|
|
// We want to count messages that happened for a friend before a
|
|
|
|
// certain date. We do this by re-joining our table a second time
|
|
|
|
// but this time with the only filter being that our id is less than
|
|
|
|
// the ID of the corresponding row in the table that is grouped by day
|
|
|
|
auto countMessagesForFriend =
|
|
|
|
QString("SELECT COUNT(*) - 1 " // Count - 1 corresponds to 0 indexed message id for friend
|
|
|
|
"FROM history countHistory " // Import unfiltered table as countHistory
|
|
|
|
"JOIN peers chat ON chat_id = chat.id " // link chat_id to chat.id
|
|
|
|
"WHERE chat.public_key = '%1'" // filter this conversation
|
|
|
|
"AND countHistory.id <= history.id") // and filter that our unfiltered table history id only has elements up to history.id
|
|
|
|
.arg(friendPkString);
|
|
|
|
|
|
|
|
auto limitString = (maxNum) ? QString("LIMIT %1").arg(maxNum) : QString("");
|
|
|
|
|
|
|
|
auto queryString = QString("SELECT (%1), (timestamp / 1000 / 60 / 60 / 24) AS day "
|
|
|
|
"FROM history "
|
|
|
|
"JOIN peers chat ON chat_id = chat.id "
|
|
|
|
"WHERE chat.public_key = '%2' "
|
|
|
|
"AND timestamp >= %3 "
|
|
|
|
"GROUP by day "
|
|
|
|
"%4;")
|
|
|
|
.arg(countMessagesForFriend)
|
|
|
|
.arg(friendPkString)
|
|
|
|
.arg(QDateTime(from).toMSecsSinceEpoch())
|
|
|
|
.arg(limitString);
|
|
|
|
|
|
|
|
QList<DateIdx> dateIdxs;
|
|
|
|
auto rowCallback = [&dateIdxs](const QVector<QVariant>& row) {
|
|
|
|
DateIdx dateIdx;
|
|
|
|
dateIdx.numMessagesIn = row[0].toLongLong();
|
|
|
|
dateIdx.date =
|
|
|
|
QDateTime::fromMSecsSinceEpoch(row[1].toLongLong() * 24 * 60 * 60 * 1000).date();
|
|
|
|
dateIdxs.append(dateIdx);
|
|
|
|
};
|
|
|
|
|
|
|
|
db->execNow({queryString, rowCallback});
|
|
|
|
|
|
|
|
return dateIdxs;
|
|
|
|
}
|
|
|
|
|
2016-07-27 06:20:54 +08:00
|
|
|
/**
|
2019-10-08 11:58:55 +08:00
|
|
|
* @brief Marks a message as delivered.
|
2016-08-01 16:21:23 +08:00
|
|
|
* Removing message from the faux-offline pending messages list.
|
|
|
|
*
|
|
|
|
* @param id Message ID.
|
|
|
|
*/
|
2019-10-08 11:58:55 +08:00
|
|
|
void History::markAsDelivered(RowId messageId)
|
2015-12-17 18:24:01 +08:00
|
|
|
{
|
2017-02-26 19:52:45 +08:00
|
|
|
if (!isValid()) {
|
2015-12-17 18:24:01 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2019-01-27 19:52:19 +08:00
|
|
|
db->execLater(QString("DELETE FROM faux_offline_pending WHERE id=%1;").arg(messageId.get()));
|
2019-06-23 18:20:48 +08:00
|
|
|
}
|