/* * Copyright (C) 2005-2009 Patrick Ohly * Copyright (C) 2009 Intel Corporation * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) version 3. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301 USA */ #ifndef _GNU_SOURCE # define _GNU_SOURCE 1 #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef ENABLE_UNIT_TESTS #include "test.h" #endif #include SE_BEGIN_CXX void SyncSourceBase::throwError(const string &action, int error) { std::string what = action + ": " + strerror(error); // be as specific if we can be: relevant for the file backend, // which is expected to return STATUS_NOT_FOUND == 404 for "file // not found" if (error == ENOENT) { throwError(STATUS_NOT_FOUND, what); } else { throwError(what); } } void SyncSourceBase::throwError(const string &failure) { SyncContext::throwError(string(getDisplayName()) + ": " + failure); } void SyncSourceBase::throwError(SyncMLStatus status, const string &failure) { SyncContext::throwError(status, getDisplayName() + ": " + failure); } SyncMLStatus SyncSourceBase::handleException(HandleExceptionFlags flags) { SyncMLStatus res = Exception::handle(getDisplayName(), flags); return res == STATUS_FATAL ? STATUS_DATASTORE_FAILURE : res; } void SyncSourceBase::getDatastoreXML(string &xml, XMLConfigFragments &fragments) { stringstream xmlstream; SynthesisInfo info; getSynthesisInfo(info, fragments); xmlstream << " SyncEvolution\n"; if (info.m_earlyStartDataRead) { xmlstream << " yes\n"; } if (info.m_readOnly) { xmlstream << " \n" " yes\n"; } xmlstream << " " << (serverModeEnabled() ? "yes" : "no") << "\n" " yes \n" " yes\n"; if (info.m_globalIDs) { xmlstream << " 1122583000"; } xmlstream << "\n" " \n" " newer-wins\n" "\n" " \n" " newer-wins\n" "\n" " \n" " UTF-8\n" " \n" " unix\n" "\n" " \n" " SYSTEM\n" "\n" " \n" " yes\n" "\n"; xmlstream << " \n" " \n"; if (!info.m_profile.empty()) { xmlstream << " \n" " \n" " \n" " \n"; } xmlstream << " \n" " \n" "\n"; xmlstream << " \n" " \n" << info.m_datatypes << " \n"; // arbitrary configuration options, can override the ones above xmlstream << info.m_datastoreOptions; xml = xmlstream.str(); } string SyncSourceBase::getNativeDatatypeName() { SynthesisInfo info; XMLConfigFragments fragments; getSynthesisInfo(info, fragments); return info.m_native; } SyncSource::SyncSource(const SyncSourceParams ¶ms) : SyncSourceConfig(params.m_name, params.m_nodes), m_numDeleted(0), m_forceSlowSync(false), m_database("", ""), m_name(params.getDisplayName()) { } SDKInterface *SyncSource::getSynthesisAPI() const { return m_synthesisAPI.empty() ? NULL : static_cast(m_synthesisAPI[m_synthesisAPI.size() - 1]); } void SyncSource::pushSynthesisAPI(sysync::SDK_InterfaceType *synthesisAPI) { m_synthesisAPI.push_back(synthesisAPI); } void SyncSource::popSynthesisAPI() { m_synthesisAPI.pop_back(); } SourceRegistry &SyncSource::getSourceRegistry() { static SourceRegistry sourceRegistry; return sourceRegistry; } RegisterSyncSource::RegisterSyncSource(const string &shortDescr, bool enabled, Create_t create, const string &typeDescr, const Values &typeValues) : m_shortDescr(shortDescr), m_enabled(enabled), m_create(create), m_typeDescr(typeDescr), m_typeValues(typeValues) { SourceRegistry ®istry(SyncSource::getSourceRegistry()); // insert sorted by description to have deterministic ordering for(SourceRegistry::iterator it = registry.begin(); it != registry.end(); ++it) { if ((*it)->m_shortDescr > shortDescr) { registry.insert(it, this); return; } } registry.push_back(this); } class InactiveSyncSource : public SyncSource { public: InactiveSyncSource(const SyncSourceParams ¶ms) : SyncSource(params) {} virtual bool isInactive() const { return true; } virtual void enableServerMode() {} virtual bool serverModeEnabled() const { return false; } virtual void getSynthesisInfo(SyncEvo::SyncSourceBase::SynthesisInfo&, SyncEvo::XMLConfigFragments&) { throwError("inactive"); } virtual Databases getDatabases() { throwError("inactive"); return Databases(); } virtual void open() { throwError("inactive"); } virtual void close() { throwError("inactive"); } virtual std::string getPeerMimeType() const { return ""; } }; SyncSource *RegisterSyncSource::InactiveSource(const SyncSourceParams ¶ms) { return new InactiveSyncSource(params); } TestRegistry &SyncSource::getTestRegistry() { static TestRegistry testRegistry; return testRegistry; } RegisterSyncSourceTest::RegisterSyncSourceTest(const string &configName, const string &testCaseName) : m_configName(configName), m_testCaseName(testCaseName) { SyncSource::getTestRegistry().push_back(this); } static class ScannedModules { public: ScannedModules() { #ifdef ENABLE_MODULES list > > dirs; /* If enviroment variable SYNCEVOLUTION_BACKEND_DIR is set, will search backends from this path instead. */ string backend_dir (SYNCEVO_BACKEND); char *backend_env = getenv("SYNCEVOLUTION_BACKEND_DIR"); if (backend_env && strlen(backend_env)){ backend_dir = backend_env; } boost::shared_ptr dir (new ReadDir (backend_dir, false)); string dirpath (backend_dir); // scan directories for matching module names do { debug<<"Scanning backend libraries in " < subdir (new ReadDir (path, false)); dirs.push_back (make_pair(path, subdir)); } continue; } if (entry.rfind(".so") == entry.length()-3){ // Open the shared object so that backend can register // itself. We keep that pointer, so never close the // module! string fullpath = dirpath + '/' + entry; fullpath = normalizePath(fullpath); // RTLD_LAZY is needed for the WebDAV backend, which // needs to do an explicit dlopen() of libneon in compatibility // mode before any of the neon functions can be resolved. dlhandle = dlopen(fullpath.c_str(), RTLD_LAZY|RTLD_GLOBAL); // remember which modules were found and which were not if (dlhandle) { debug<<"Loading backend library "< m_available; std::ostringstream debug, info; } scannedModules; string SyncSource::backendsInfo() { return scannedModules.info.str(); } string SyncSource::backendsDebug() { return scannedModules.debug.str(); } void SyncSource::requestAnotherSync() { // At the moment the per-source request to restart cannot be // stored; instead only a per-session request is set. That's okay // for now because restarting is limited to sessions with only // one source active (intentional simplification). SE_LOG_DEBUG(getDisplayName(), "requesting another sync"); SyncContext::requestAnotherSync(); } SyncSource *SyncSource::createSource(const SyncSourceParams ¶ms, bool error, SyncConfig *config) { SourceType sourceType = getSourceType(params.m_nodes); if (sourceType.m_backend == "virtual") { SyncSource *source = NULL; source = new VirtualSyncSource(params, config); if (error && !source) { SyncContext::throwError(params.getDisplayName() + ": virtual source cannot be instantiated"); } return source; } const SourceRegistry ®istry(getSourceRegistry()); auto_ptr source; BOOST_FOREACH(const RegisterSyncSource *sourceInfos, registry) { auto_ptr nextSource(sourceInfos->m_create(params)); if (nextSource.get()) { if (source.get()) { SyncContext::throwError(params.getDisplayName() + ": backend " + sourceType.m_backend + " is ambiguous, avoid the alias and pick a specific backend instead directly"); } source = nextSource; } } if (source.get()) { return source.release(); } if (error) { string backends; if (scannedModules.m_available.size()) { backends += "by any of the backend modules ("; backends += boost::join(scannedModules.m_available, ", "); backends += ") "; } string problem = StringPrintf("%s%sbackend not supported %sor not correctly configured (backend=%s databaseFormat=%s syncFormat=%s)", params.m_name.c_str(), params.m_name.empty() ? "" : ": ", backends.c_str(), sourceType.m_backend.c_str(), sourceType.m_localFormat.c_str(), sourceType.m_format.c_str()); SyncContext::throwError(SyncMLStatus(sysync::LOCERR_CFGPARSE), problem); } return NULL; } SyncSource *SyncSource::createTestingSource(const string &name, const string &type, bool error, const char *prefix) { std::string config = "target-config@client-test"; const char *server = getenv("CLIENT_TEST_SERVER"); if (server) { config += "-"; config += server; } boost::shared_ptr context(new SyncConfig(config)); SyncSourceNodes nodes = context->getSyncSourceNodes(name); SyncSourceParams params(name, nodes, context); PersistentSyncSourceConfig sourceconfig(name, nodes); sourceconfig.setSourceType(type); if (prefix) { sourceconfig.setDatabaseID(string(prefix) + name + "_1"); } return createSource(params, error); } VirtualSyncSource::VirtualSyncSource(const SyncSourceParams ¶ms, SyncConfig *config) : DummySyncSource(params) { if (config) { std::string evoSyncSource = getDatabaseID(); BOOST_FOREACH(std::string name, getMappedSources()) { if (name.empty()) { throwError(StringPrintf("configuration of underlying sources contains empty source name: database = '%s'", evoSyncSource.c_str())); } SyncSourceNodes source = config->getSyncSourceNodes(name); SyncSourceParams params(name, source, boost::shared_ptr(config, SyncConfigNOP())); boost::shared_ptr syncSource(createSource(params, true, config)); m_sources.push_back(syncSource); } if (m_sources.size() != 2) { throwError(StringPrintf("configuration of underlying sources must contain exactly one calendar and one todo source (like calendar+todo): database = '%s'", evoSyncSource.c_str())); } } } void VirtualSyncSource::open() { getDataTypeSupport(); BOOST_FOREACH(boost::shared_ptr &source, m_sources) { source->open(); } } void VirtualSyncSource::close() { BOOST_FOREACH(boost::shared_ptr &source, m_sources) { source->close(); } } std::vector VirtualSyncSource::getMappedSources() { std::string evoSyncSource = getDatabaseID(); std::vector mappedSources = unescapeJoinedString (evoSyncSource, ','); return mappedSources; } std::string VirtualSyncSource::getDataTypeSupport() { string datatypes; SourceType sourceType = getSourceType(); string type = sourceType.m_format; datatypes = getDataTypeSupport(type, sourceType.m_forceFormat); return datatypes; } SyncSource::Databases VirtualSyncSource::getDatabases() { SyncSource::Databases dbs; BOOST_FOREACH (boost::shared_ptr &source, m_sources) { SyncSource::Databases sub = source->getDatabases(); if (sub.empty()) { return dbs; } } Database db ("calendar+todo", ""); dbs.push_back (db); return dbs; } void SyncSourceSession::init(SyncSource::Operations &ops) { ops.m_startDataRead = boost::bind(&SyncSourceSession::startDataRead, this, _1, _2); ops.m_endDataRead = boost::lambda::constant(sysync::LOCERR_OK); ops.m_startDataWrite = boost::lambda::constant(sysync::LOCERR_OK); ops.m_endDataWrite = boost::bind(&SyncSourceSession::endDataWrite, this, _1, _2); } sysync::TSyError SyncSourceSession::startDataRead(const char *lastToken, const char *resumeToken) { beginSync(lastToken ? lastToken : "", resumeToken ? resumeToken : ""); return sysync::LOCERR_OK; } sysync::TSyError SyncSourceSession::endDataWrite(bool success, char **newToken) { std::string token = endSync(success); *newToken = StrAlloc(token.c_str()); return sysync::LOCERR_OK; } void SyncSourceChanges::init(SyncSource::Operations &ops) { ops.m_readNextItem = boost::bind(&SyncSourceChanges::iterate, this, _1, _2, _3); } SyncSourceChanges::SyncSourceChanges() : m_first(true) { } bool SyncSourceChanges::addItem(const string &luid, State state) { pair res = m_items[state].insert(luid); return res.second; } bool SyncSourceChanges::reset() { bool removed = false; for (int i = 0; i < MAX; i++) { if (!m_items[i].empty()) { m_items[i].clear(); removed = true; } } m_first = true; return removed; } sysync::TSyError SyncSourceChanges::iterate(sysync::ItemID aID, sysync::sInt32 *aStatus, bool aFirst) { aID->item = NULL; aID->parent = NULL; if (m_first || aFirst) { m_it = m_items[ANY].begin(); m_first = false; } if (m_it == m_items[ANY].end()) { *aStatus = sysync::ReadNextItem_EOF; } else { const string &luid = *m_it; if (m_items[NEW].find(luid) != m_items[NEW].end() || m_items[UPDATED].find(luid) != m_items[UPDATED].end()) { *aStatus = sysync::ReadNextItem_Changed; } else { *aStatus = sysync::ReadNextItem_Unchanged; } aID->item = StrAlloc(luid.c_str()); ++m_it; } return sysync::LOCERR_OK; } void SyncSourceDelete::init(SyncSource::Operations &ops) { ops.m_deleteItem = boost::bind(&SyncSourceDelete::deleteItemSynthesis, this, _1); } sysync::TSyError SyncSourceDelete::deleteItemSynthesis(sysync::cItemID aID) { deleteItem(aID->item); incrementNumDeleted(); return sysync::LOCERR_OK; } void SyncSourceSerialize::getSynthesisInfo(SynthesisInfo &info, XMLConfigFragments &fragments) { string type = getMimeType(); // default remote rule (local-storage.xml): suppresses empty properties info.m_backendRule = "LOCALSTORAGE"; // We store entire items locally and thus have to make sure that // they are complete by having the engine merge incoming and local // data. info.m_datastoreOptions += " true\n"; if (type == "text/x-vcard") { info.m_native = "vCard21"; info.m_fieldlist = "contacts"; info.m_profile = "\"vCard\", 1"; info.m_datatypes = " \n" " \n"; } else if (type == "text/vcard") { info.m_native = "vCard30"; info.m_fieldlist = "contacts"; info.m_profile = "\"vCard\", 2"; info.m_datatypes = " \n" " \n"; // If a backend overwrites the m_beforeWriteScript, then it must // include $VCARD_OUTGOING_PHOTO_VALUE_SCRIPT in its own script, // otherwise it will be sent invalid, empty PHOTO;TYPE=unknown;VALUE=binary: // properties. info.m_beforeWriteScript = "$VCARD_OUTGOING_PHOTO_VALUE_SCRIPT;\n"; // Likewise for reading. This is needed to ensure proper merging // of contact data. info.m_afterReadScript = "$VCARD_INCOMING_PHOTO_VALUE_SCRIPT;\n"; } else if (type == "text/x-calendar" || type == "text/x-vcalendar") { info.m_native = "vCalendar10"; info.m_fieldlist = "calendar"; info.m_profile = "\"vCalendar\", 1"; info.m_datatypes = " \n" " \n"; /** * here are two default implementations. If user wants to reset it, * just implement its own getSynthesisInfo. If user wants to use this default * implementations and its new scripts, it is possible to append its implementations * to info.m_afterReadScript and info.m_beforeWriteScript. */ info.m_afterReadScript = "$VCALENDAR10_AFTERREAD_SCRIPT;\n"; info.m_beforeWriteScript = "$VCALENDAR10_BEFOREWRITE_SCRIPT;\n" "$CALENDAR_BEFOREWRITE_SCRIPT;\n"; } else if (type == "text/calendar" || boost::starts_with(type, "text/calendar+")) { info.m_native = "iCalendar20"; info.m_fieldlist = "calendar"; info.m_profile = "\"vCalendar\", 2"; info.m_datatypes = " \n" " \n"; info.m_beforeWriteScript = "$CALENDAR_BEFOREWRITE_SCRIPT;\n"; } else if (type == "text/plain") { info.m_fieldlist = "Note"; info.m_profile = "\"Note\", 2"; } else { throwError(string("default MIME type not supported: ") + type); } SourceType sourceType = getSourceType(); if (!sourceType.m_format.empty()) { type = sourceType.m_format; } info.m_datatypes = getDataTypeSupport(type, sourceType.m_forceFormat); } std::string SyncSourceBase::getDataTypeSupport(const std::string &type, bool forceFormat) { std::string datatypes; if (type == "text/x-vcard:2.1" || type == "text/x-vcard") { datatypes = " \n"; if (!forceFormat) { datatypes += " \n"; } } else if (type == "text/vcard:3.0" || type == "text/vcard") { datatypes = " \n"; if (!forceFormat) { datatypes += " \n"; } } else if (type == "text/x-vcalendar:1.0" || type == "text/x-vcalendar" || type == "text/x-calendar:1.0" || type == "text/x-calendar") { datatypes = " \n"; if (!forceFormat) { datatypes += " \n"; } } else if (type == "text/calendar:2.0" || type == "text/calendar") { datatypes = " \n"; if (!forceFormat) { datatypes += " \n"; } } else if (type == "text/calendar+plain") { datatypes = " \n" " \n" " \n"; } else if (type == "text/plain:1.0" || type == "text/plain") { // note10 are the same as note11, so ignore force format datatypes = " \n" " \n"; } else if (type.empty()) { throwError("no MIME type configured"); } else { throwError(string("configured MIME type not supported: ") + type); } return datatypes; } sysync::TSyError SyncSourceSerialize::readItemAsKey(sysync::cItemID aID, sysync::KeyH aItemKey) { std::string item; readItem(aID->item, item); TSyError res = getSynthesisAPI()->setValue(aItemKey, "data", item.c_str(), item.size()); return res; } sysync::TSyError SyncSourceSerialize::insertItemAsKey(sysync::KeyH aItemKey, sysync::cItemID aID, sysync::ItemID newID) { SharedBuffer data; TSyError res = getSynthesisAPI()->getValue(aItemKey, "data", data); if (!res) { InsertItemResult inserted = insertItem(!aID ? "" : aID->item, data.get()); newID->item = StrAlloc(inserted.m_luid.c_str()); switch (inserted.m_state) { case ITEM_OKAY: break; case ITEM_REPLACED: res = sysync::DB_DataReplaced; break; case ITEM_MERGED: res = sysync::DB_DataMerged; break; case ITEM_NEEDS_MERGE: res = sysync::DB_Conflict; break; } } return res; } void SyncSourceSerialize::init(SyncSource::Operations &ops) { ops.m_readItemAsKey = boost::bind(&SyncSourceSerialize::readItemAsKey, this, _1, _2); ops.m_insertItemAsKey = boost::bind(&SyncSourceSerialize::insertItemAsKey, this, _1, (sysync::cItemID)NULL, _2); ops.m_updateItemAsKey = boost::bind(&SyncSourceSerialize::insertItemAsKey, this, _1, _2, _3); } void ItemCache::init(const SyncSource::Operations::ConstBackupInfo &oldBackup, const SyncSource::Operations::BackupInfo &newBackup, bool legacy) { m_counter = 1; m_legacy = legacy; m_backup = newBackup; m_hash2counter.clear(); m_dirname = oldBackup.m_dirname; if (m_dirname.empty() || !oldBackup.m_node) { return; } long numitems; if (!oldBackup.m_node->getProperty("numitems", numitems)) { return; } for (long counter = 1; counter <= numitems; counter++) { stringstream key; key << counter << m_hashSuffix; Hash_t hash; if (oldBackup.m_node->getProperty(key.str(), hash)) { m_hash2counter[hash] = counter; } } } void ItemCache::reset() { // clean directory and start counting at 1 again m_counter = 1; rm_r(m_backup.m_dirname); mkdir_p(m_backup.m_dirname); m_backup.m_node->clear(); } string ItemCache::getFilename(Hash_t hash) { Map_t::const_iterator it = m_hash2counter.find(hash); if (it != m_hash2counter.end()) { stringstream dirname; dirname << m_dirname << "/" << it->second; return dirname.str(); } else { return ""; } } const char *ItemCache::m_hashSuffix = #ifdef USE_SHA256 "-sha256" #else "-hash" #endif ; void ItemCache::backupItem(const std::string &item, const std::string &uid, const std::string &rev) { stringstream filename; filename << m_backup.m_dirname << "/" << m_counter; ItemCache::Hash_t hash = hashFunc(item); string oldfilename = getFilename(hash); if (!oldfilename.empty()) { // found old file with same content, reuse it via hardlink if (link(oldfilename.c_str(), filename.str().c_str())) { // Hard linking failed. Record this, then continue // by ignoring the old file. SE_LOG_DEBUG(NULL, "hard linking old %s new %s: %s", oldfilename.c_str(), filename.str().c_str(), strerror(errno)); oldfilename.clear(); } } if (oldfilename.empty()) { // write new file instead of reusing old one ofstream out(filename.str().c_str()); out.write(item.c_str(), item.size()); out.close(); if (out.fail()) { SE_THROW(string("error writing ") + filename.str() + ": " + strerror(errno)); } } stringstream key; key << m_counter << "-uid"; m_backup.m_node->setProperty(key.str(), uid); if (m_legacy) { // clear() does not remove the existing content, which was // intended here. This should have been key.str(""). As a // result, keys for -rev are longer than intended because they // start with the -uid part. We cannot change it now, because // that would break compatibility with nodes that use the // older, longer keys for -rev. // key.clear(); } else { key.str(""); } key << m_counter << "-rev"; m_backup.m_node->setProperty(key.str(), rev); key.str(""); key << m_counter << ItemCache::m_hashSuffix; m_backup.m_node->setProperty(key.str(), hash); m_counter++; } void ItemCache::finalize(BackupReport &report) { stringstream value; value << m_counter - 1; m_backup.m_node->setProperty("numitems", value.str()); m_backup.m_node->flush(); report.setNumItems(m_counter - 1); } void SyncSourceRevisions::initRevisions() { if (!m_revisionsSet) { // might still be filled with garbage from previous run m_revisions.clear(); listAllItems(m_revisions); m_revisionsSet = true; } } void SyncSourceRevisions::backupData(const SyncSource::Operations::ConstBackupInfo &oldBackup, const SyncSource::Operations::BackupInfo &newBackup, BackupReport &report) { ItemCache cache; cache.init(oldBackup, newBackup, true); bool startOfSync = newBackup.m_mode == SyncSource::Operations::BackupInfo::BACKUP_BEFORE; RevisionMap_t buffer; RevisionMap_t *revisions; if (startOfSync) { initRevisions(); revisions = &m_revisions; } else { listAllItems(buffer); revisions = &buffer; } string item; errno = 0; BOOST_FOREACH(const StringPair &mapping, *revisions) { const string &uid = mapping.first; const string &rev = mapping.second; m_raw->readItemRaw(uid, item); cache.backupItem(item, uid, rev); } cache.finalize(report); } void SyncSourceRevisions::restoreData(const SyncSource::Operations::ConstBackupInfo &oldBackup, bool dryrun, SyncSourceReport &report) { RevisionMap_t revisions; listAllItems(revisions); long numitems; string strval; strval = oldBackup.m_node->readProperty("numitems"); stringstream stream(strval); stream >> numitems; for (long counter = 1; counter <= numitems; counter++) { stringstream key; key << counter << "-uid"; string uid = oldBackup.m_node->readProperty(key.str()); key.clear(); key << counter << "-rev"; string rev = oldBackup.m_node->readProperty(key.str()); RevisionMap_t::iterator it = revisions.find(uid); report.incrementItemStat(report.ITEM_LOCAL, report.ITEM_ANY, report.ITEM_TOTAL); if (it != revisions.end() && it->second == rev) { // item exists in backup and database with same revision: // nothing to do } else { // add or update, so need item stringstream filename; filename << oldBackup.m_dirname << "/" << counter; string data; if (!ReadFile(filename.str(), data)) { throwError(StringPrintf("restoring %s from %s failed: could not read file", uid.c_str(), filename.str().c_str())); } // TODO: it would be nicer to recreate the item // with the original revision. If multiple peers // synchronize against us, then some of them // might still be in sync with that revision. By // updating the revision here we force them to // needlessly receive an update. // // For the current peer for which we restore this is // avoided by the revision check above: unchanged // items aren't touched. SyncSourceReport::ItemState state = it == revisions.end() ? SyncSourceReport::ITEM_ADDED : // not found in database, create anew SyncSourceReport::ITEM_UPDATED; // found, update existing item try { report.incrementItemStat(report.ITEM_LOCAL, state, report.ITEM_TOTAL); if (!dryrun) { m_raw->insertItemRaw(it == revisions.end() ? "" : uid, data); } } catch (...) { report.incrementItemStat(report.ITEM_LOCAL, state, report.ITEM_REJECT); throw; } } // remove handled item from revision list so // that when we are done, the only remaining // items listed there are the ones which did // no exist in the backup if (it != revisions.end()) { revisions.erase(it); } } // now remove items that were not in the backup BOOST_FOREACH(const StringPair &mapping, revisions) { try { report.incrementItemStat(report.ITEM_LOCAL, report.ITEM_REMOVED, report.ITEM_TOTAL); if (!dryrun) { m_del->deleteItem(mapping.first); } } catch(...) { report.incrementItemStat(report.ITEM_LOCAL, report.ITEM_REMOVED, report.ITEM_REJECT); throw; } } } bool SyncSourceRevisions::detectChanges(ConfigNode &trackingNode, ChangeMode mode) { bool forceSlowSync = false; // erase content which might have been set in a previous call reset(); if (!m_firstCycle) { // detectChanges() must have been called before; // don't trust our cached revisions in that case (not updated during sync!) // TODO: keep the revision map up-to-date as part of a sync and reuse it m_revisionsSet = false; } else { m_firstCycle = false; } if (mode == CHANGES_NONE) { // shortcut because nothing changed: just copy our known item list ConfigProps props; trackingNode.readProperties(props); RevisionMap_t revisions; BOOST_FOREACH(const StringPair &mapping, props) { const string &uid = mapping.first; const string &revision = mapping.second; addItem(uid); revisions[uid] = revision; } setAllItems(revisions); return false; } if (!m_revisionsSet && mode == CHANGES_FULL) { ConfigProps props; trackingNode.readProperties(props); if (!props.empty()) { // We were not asked to throw away all old information and // there is some that may be worth salvaging, so let's give // our derived class a chance to update it instead of having // to reread everything. // // The exact number of items at which the update method is // more efficient depends on the derived class; here we assume // that even a single item makes it worthwhile. The derived // class can always ignore the information if it has different // tradeoffs. // // TODO (?): an API which only provides the information // on demand... m_revisions.clear(); m_revisions.insert(props.begin(), props.end()); updateAllItems(m_revisions); // continue with m_revisions initialized below m_revisionsSet = true; } } // traditional, slow fallback follows... initRevisions(); // Check whether we have valid revision information. If not, then // we need to do a slow sync. The assumption here is if (!m_revisions.empty() && m_revisions.begin()->second.empty()) { forceSlowSync = true; mode = CHANGES_SLOW; } // Delay setProperty calls until after checking all uids. // Necessary for MapSyncSource, which shares the revision among // several uids. Another advantage is that we can do the "find // deleted items" check with less entries (new items not added // yet). StringMap revUpdates; if (mode == CHANGES_SLOW) { // Make tracking node identical to current set of items // by re-adding them below. trackingNode.clear(); } BOOST_FOREACH(const StringPair &mapping, m_revisions) { const string &uid = mapping.first; const string &revision = mapping.second; // always remember the item, need full list addItem(uid); // avoid unnecessary work in CHANGES_SLOW mode if (mode == CHANGES_SLOW) { trackingNode.setProperty(uid, revision); } else { // detect changes string serverRevision(trackingNode.readProperty(uid)); if (!serverRevision.size()) { addItem(uid, NEW); revUpdates[uid] = revision; } else { if (revision != serverRevision) { addItem(uid, UPDATED); revUpdates[uid] = revision; } } } } if (mode != CHANGES_SLOW) { // clear information about all items that we recognized as deleted ConfigProps props; trackingNode.readProperties(props); BOOST_FOREACH(const StringPair &mapping, props) { const string &uid(mapping.first); if (getAllItems().find(uid) == getAllItems().end()) { addItem(uid, DELETED); trackingNode.removeProperty(uid); } } // now update tracking node BOOST_FOREACH(const StringPair &update, revUpdates) { trackingNode.setProperty(update.first, update.second); } } return forceSlowSync; } void SyncSourceRevisions::updateRevision(ConfigNode &trackingNode, const std::string &old_luid, const std::string &new_luid, const std::string &revision) { databaseModified(); if (old_luid != new_luid) { trackingNode.removeProperty(old_luid); } if (new_luid.empty() || revision.empty()) { throwError("need non-empty LUID and revision string"); } trackingNode.setProperty(new_luid, revision); } void SyncSourceRevisions::deleteRevision(ConfigNode &trackingNode, const std::string &luid) { databaseModified(); trackingNode.removeProperty(luid); } void SyncSourceRevisions::sleepSinceModification() { Timespec current = Timespec::monotonic(); // Don't let this get interrupted by user abort. // It is needed for correct change tracking. while ((current - m_modTimeStamp).duration() < m_revisionAccuracySeconds) { Sleep(m_revisionAccuracySeconds - (current - m_modTimeStamp).duration()); current = Timespec::monotonic(); } } void SyncSourceRevisions::databaseModified() { m_modTimeStamp = Timespec::monotonic(); } void SyncSourceRevisions::init(SyncSourceRaw *raw, SyncSourceDelete *del, int granularity, SyncSource::Operations &ops) { m_raw = raw; m_del = del; m_revisionAccuracySeconds = granularity; m_revisionsSet = false; m_firstCycle = false; if (raw) { ops.m_backupData = boost::bind(&SyncSourceRevisions::backupData, this, _1, _2, _3); } if (raw && del) { ops.m_restoreData = boost::bind(&SyncSourceRevisions::restoreData, this, _1, _2, _3); } ops.m_endDataWrite.getPostSignal().connect(boost::bind(&SyncSourceRevisions::sleepSinceModification, this)); } std::string SyncSourceLogging::getDescription(sysync::KeyH aItemKey) { try { std::list values; BOOST_FOREACH(const std::string &field, m_fields) { SharedBuffer value; if (!getSynthesisAPI()->getValue(aItemKey, field, value) && value.size()) { values.push_back(std::string(value.get())); } } std::string description = boost::join(values, m_sep); return description; } catch (...) { // Instead of failing we log the error and ask // the caller to log the UID. That way transient // errors or errors in the logging code don't // prevent syncs. handleException(); return ""; } } std::string SyncSourceLogging::getDescription(const string &luid) { return ""; } void SyncSourceLogging::insertItemAsKey(sysync::KeyH aItemKey, sysync::ItemID newID) { std::string description = getDescription(aItemKey); SE_LOG_INFO(getDisplayName(), description.empty() ? "%s <%s>" : "%s \"%s\"", "adding", !description.empty() ? description.c_str() : "???"); } void SyncSourceLogging::updateItemAsKey(sysync::KeyH aItemKey, sysync::cItemID aID, sysync::ItemID newID) { std::string description = getDescription(aItemKey); SE_LOG_INFO(getDisplayName(), description.empty() ? "%s <%s>" : "%s \"%s\"", "updating", !description.empty() ? description.c_str() : aID ? aID->item : "???"); } void SyncSourceLogging::deleteItem(sysync::cItemID aID) { std::string description = getDescription(aID->item); SE_LOG_INFO(getDisplayName(), description.empty() ? "%s <%s>" : "%s \"%s\"", "deleting", !description.empty() ? description.c_str() : aID->item); } void SyncSourceLogging::init(const std::list &fields, const std::string &sep, SyncSource::Operations &ops) { m_fields = fields; m_sep = sep; ops.m_insertItemAsKey.getPreSignal().connect(boost::bind(&SyncSourceLogging::insertItemAsKey, this, _2, _3)); ops.m_updateItemAsKey.getPreSignal().connect(boost::bind(&SyncSourceLogging::updateItemAsKey, this, _2, _3, _4)); ops.m_deleteItem.getPreSignal().connect(boost::bind(&SyncSourceLogging::deleteItem, this, _2)); } sysync::TSyError SyncSourceAdmin::loadAdminData(const char *aLocDB, const char *aRemDB, char **adminData) { std::string data = m_configNode->readProperty(m_adminPropertyName); *adminData = StrAlloc(StringEscape::unescape(data, '!').c_str()); resetMap(); return sysync::LOCERR_OK; } sysync::TSyError SyncSourceAdmin::saveAdminData(const char *adminData) { m_configNode->setProperty(m_adminPropertyName, StringEscape::escape(adminData, '!', StringEscape::INI_VALUE)); // Flush here, because some calls to saveAdminData() happend // after SyncSourceAdmin::flush() (= session end). m_configNode->flush(); return sysync::LOCERR_OK; } bool SyncSourceAdmin::readNextMapItem(sysync::MapID mID, bool aFirst) { if (aFirst) { resetMap(); } if (m_mappingIterator != m_mapping.end()) { entry2mapid(m_mappingIterator->first, m_mappingIterator->second, mID); ++m_mappingIterator; return true; } else { return false; } } sysync::TSyError SyncSourceAdmin::insertMapItem(sysync::cMapID mID) { string key, value; mapid2entry(mID, key, value); #if 0 StringMap::iterator it = m_mapping.find(key); if (it != m_mapping.end()) { // error, exists already return sysync::DB_Forbidden; } else { m_mapping[key] = value; return sysync::LOCERR_OK; } #else m_mapping[key] = value; m_mappingNode->clear(); m_mappingNode->writeProperties(m_mapping); m_mappingNode->flush(); return sysync::LOCERR_OK; #endif } sysync::TSyError SyncSourceAdmin::updateMapItem(sysync::cMapID mID) { string key, value; mapid2entry(mID, key, value); ConfigProps::iterator it = m_mapping.find(key); if (it == m_mapping.end()) { // error, does not exist return sysync::DB_Forbidden; } else { m_mapping[key] = value; m_mappingNode->clear(); m_mappingNode->writeProperties(m_mapping); m_mappingNode->flush(); return sysync::LOCERR_OK; } } sysync::TSyError SyncSourceAdmin::deleteMapItem(sysync::cMapID mID) { string key, value; mapid2entry(mID, key, value); ConfigProps::iterator it = m_mapping.find(key); if (it == m_mapping.end()) { // error, does not exist return sysync::DB_Forbidden; } else { m_mapping.erase(it); m_mappingNode->clear(); m_mappingNode->writeProperties(m_mapping); m_mappingNode->flush(); return sysync::LOCERR_OK; } } void SyncSourceAdmin::flush() { m_configNode->flush(); if (m_mappingLoaded) { m_mappingNode->clear(); m_mappingNode->writeProperties(m_mapping); m_mappingNode->flush(); } } void SyncSourceAdmin::resetMap() { m_mapping.clear(); m_mappingNode->readProperties(m_mapping); m_mappingIterator = m_mapping.begin(); m_mappingLoaded = true; } void SyncSourceAdmin::mapid2entry(sysync::cMapID mID, string &key, string &value) { key = StringPrintf("%s-%x", StringEscape::escape(mID->localID ? mID->localID : "", '!', StringEscape::INI_WORD).c_str(), mID->ident); if (mID->remoteID && mID->remoteID[0]) { value = StringPrintf("%s %x", StringEscape::escape(mID->remoteID ? mID->remoteID : "", '!', StringEscape::INI_WORD).c_str(), mID->flags); } else { value = StringPrintf("%x", mID->flags); } } void SyncSourceAdmin::entry2mapid(const string &key, const string &value, sysync::MapID mID) { size_t found = key.rfind('-'); mID->localID = StrAlloc(StringEscape::unescape(key.substr(0,found), '!').c_str()); if (found != key.npos) { mID->ident = strtol(key.substr(found+1).c_str(), NULL, 16); } else { mID->ident = 0; } std::vector< std::string > tokens; boost::split(tokens, value, boost::is_from_range(' ', ' ')); if (tokens.size() >= 2) { // if branch from mapid2entry above mID->remoteID = StrAlloc(StringEscape::unescape(tokens[0], '!').c_str()); mID->flags = strtol(tokens[1].c_str(), NULL, 16); } else { // else branch from above mID->remoteID = NULL; mID->flags = strtol(tokens[0].c_str(), NULL, 16); } } void SyncSourceAdmin::init(SyncSource::Operations &ops, const boost::shared_ptr &config, const std::string adminPropertyName, const boost::shared_ptr &mapping) { m_configNode = config; m_adminPropertyName = adminPropertyName; m_mappingNode = mapping; m_mappingLoaded = false; ops.m_loadAdminData = boost::bind(&SyncSourceAdmin::loadAdminData, this, _1, _2, _3); ops.m_saveAdminData = boost::bind(&SyncSourceAdmin::saveAdminData, this, _1); ops.m_readNextMapItem = boost::bind(&SyncSourceAdmin::readNextMapItem, this, _1, _2); ops.m_insertMapItem = boost::bind(&SyncSourceAdmin::insertMapItem, this, _1); ops.m_updateMapItem = boost::bind(&SyncSourceAdmin::updateMapItem, this, _1); ops.m_deleteMapItem = boost::bind(&SyncSourceAdmin::deleteMapItem, this, _1); ops.m_endDataWrite.getPostSignal().connect(boost::bind(&SyncSourceAdmin::flush, this)); } void SyncSourceAdmin::init(SyncSource::Operations &ops, SyncSource *source) { init(ops, source->getProperties(true), SourceAdminDataName, source->getServerNode()); } void SyncSourceBlob::init(SyncSource::Operations &ops, const std::string &dir) { m_blob.Init(getSynthesisAPI(), getName().c_str(), dir, "", "", ""); ops.m_readBlob = boost::bind(&SyncSourceBlob::readBlob, this, _1, _2, _3, _4, _5, _6, _7); ops.m_writeBlob = boost::bind(&SyncSourceBlob::writeBlob, this, _1, _2, _3, _4, _5, _6, _7); ops.m_deleteBlob = boost::bind(&SyncSourceBlob::deleteBlob, this, _1, _2); } void TestingSyncSource::removeAllItems() { // remove longest luids first: // for luid=UID[+RECURRENCE-ID] that will // remove children from a merged event first, // which is better supported by certain servers Items_t items = getAllItems(); for (Items_t::reverse_iterator it = items.rbegin(); it != items.rend(); ++it) { deleteItem(*it); } } #ifdef ENABLE_UNIT_TESTS class SyncSourceTest : public CppUnit::TestFixture { CPPUNIT_TEST_SUITE(SyncSourceTest); CPPUNIT_TEST(backendsAvailable); CPPUNIT_TEST_SUITE_END(); void backendsAvailable() { //We expect backendsInfo() to be empty if !ENABLE_MODULES //Otherwise, there should be at least some backends. #ifdef ENABLE_MODULES CPPUNIT_ASSERT( !SyncSource::backendsInfo().empty() ); #endif } }; SYNCEVOLUTION_TEST_SUITE_REGISTRATION(SyncSourceTest); #endif // ENABLE_UNIT_TESTS SE_END_CXX