diff --git a/.github/workflows/mac_ios.yml b/.github/workflows/mac_ios.yml index c92f829b..36222741 100644 --- a/.github/workflows/mac_ios.yml +++ b/.github/workflows/mac_ios.yml @@ -8,8 +8,8 @@ on: jobs: build: - name: Build and Test default scheme using any available iPhone simulator - runs-on: macos-13 + name: Build and Test default scheme using an iPhone simulator + runs-on: macos-14 # https://github.com/actions/virtual-environments/blob/main/images/macos/macos-13-Readme.md # target macOS 12.4 isn't supported in default Xcode for macOS 12 # - default Xcode version is 14.3.1 for macos-13 builder image @@ -54,7 +54,7 @@ jobs: - name: iOS build for MeetingNotes # Xcode 15.2 settings - run: xcodebuild clean build -scheme 'MeetingNotes' -destination 'platform=iOS Simulator,OS=17.0.1,name=Any iOS Simulator Device' -sdk iphonesimulator17.2 -showBuildTimingSummary + run: xcodebuild clean build -scheme 'MeetingNotes' -destination 'platform=iOS Simulator,OS=17.0.1,name=iPhone 14' -sdk iphonesimulator17.2 -showBuildTimingSummary - name: iOS test # Xcode 15.2 settings diff --git a/MeetingNotes.xcodeproj/project.pbxproj b/MeetingNotes.xcodeproj/project.pbxproj index 8a16ec54..0fdb7f83 100644 --- a/MeetingNotes.xcodeproj/project.pbxproj +++ b/MeetingNotes.xcodeproj/project.pbxproj @@ -3,7 +3,7 @@ archiveVersion = 1; classes = { }; - objectVersion = 56; + objectVersion = 60; objects = { /* Begin PBXBuildFile section */ @@ -15,48 +15,27 @@ 1A0DDC4A2A464DEB001ECADD /* MeetingNotesTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 1A0DDC492A464DEB001ECADD /* MeetingNotesTests.swift */; }; 1A0DDC542A464DEB001ECADD /* MeetingNotesUITests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 1A0DDC532A464DEB001ECADD /* MeetingNotesUITests.swift */; }; 1A0DDC562A464DEB001ECADD /* MeetingNotesUITestsLaunchTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 1A0DDC552A464DEB001ECADD /* MeetingNotesUITestsLaunchTests.swift */; }; - 1A11B8922B7AC37100CB4CA9 /* URLSessionWebSocketTask+sendPing.swift in Sources */ = {isa = PBXBuildFile; fileRef = 1A11B8912B7AC37100CB4CA9 /* URLSessionWebSocketTask+sendPing.swift */; }; - 1A2521442B1567F20096951D /* DocumentId.swift in Sources */ = {isa = PBXBuildFile; fileRef = 1A2521432B1567F20096951D /* DocumentId.swift */; }; + 1A273DC32B93EC8D00B321C5 /* AutomergeRepo in Frameworks */ = {isa = PBXBuildFile; productRef = 1A273DC22B93EC8D00B321C5 /* AutomergeRepo */; }; + 1A273DC62B93ECF400B321C5 /* AutomergeRepo in Frameworks */ = {isa = PBXBuildFile; productRef = 1A273DC52B93ECF400B321C5 /* AutomergeRepo */; }; + 1A273DC92B93EE9300B321C5 /* PotentCodables in Frameworks */ = {isa = PBXBuildFile; productRef = 1A273DC82B93EE9300B321C5 /* PotentCodables */; }; + 1A273DCC2B93EEA600B321C5 /* Base58Swift in Frameworks */ = {isa = PBXBuildFile; productRef = 1A273DCB2B93EEA600B321C5 /* Base58Swift */; }; + 1A273DCF2B93EEBB00B321C5 /* Automerge in Frameworks */ = {isa = PBXBuildFile; productRef = 1A273DCE2B93EEBB00B321C5 /* Automerge */; }; + 1A273DD12B93EEE200B321C5 /* PotentCodables in Frameworks */ = {isa = PBXBuildFile; productRef = 1A273DD02B93EEE200B321C5 /* PotentCodables */; }; + 1A273DD32B93EEEC00B321C5 /* Base58Swift in Frameworks */ = {isa = PBXBuildFile; productRef = 1A273DD22B93EEEC00B321C5 /* Base58Swift */; }; + 1A273DD52B93EEF700B321C5 /* Automerge in Frameworks */ = {isa = PBXBuildFile; productRef = 1A273DD42B93EEF700B321C5 /* Automerge */; }; + 1A273DD72B93F64500B321C5 /* Logger+extensions.swift in Sources */ = {isa = PBXBuildFile; fileRef = 1A273DD62B93F64500B321C5 /* Logger+extensions.swift */; }; 1A2A02A52A50E74B0044064B /* EditableAgendaItemView.swift in Sources */ = {isa = PBXBuildFile; fileRef = 1A2A02A42A50E74A0044064B /* EditableAgendaItemView.swift */; }; 1A2AD0312A7437E200EF0C5F /* SyncConnectionView.swift in Sources */ = {isa = PBXBuildFile; fileRef = 1A2AD0302A7437E200EF0C5F /* SyncConnectionView.swift */; }; - 1A30C4DB2B155ECF007CE4F0 /* Data+hexEncodedString.swift in Sources */ = {isa = PBXBuildFile; fileRef = 1A30C4DA2B155ECF007CE4F0 /* Data+hexEncodedString.swift */; }; - 1A30C4DD2B155F07007CE4F0 /* UUID+bs58String.swift in Sources */ = {isa = PBXBuildFile; fileRef = 1A30C4DC2B155F07007CE4F0 /* UUID+bs58String.swift */; }; - 1A4A610C2A6F05BE00E097F1 /* TimeInterval+milliseconds.swift in Sources */ = {isa = PBXBuildFile; fileRef = 1A4A610B2A6F05BE00E097F1 /* TimeInterval+milliseconds.swift */; }; - 1A5F2D3B2B8E70F300C9417D /* CBORDecodingTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 1A5F2D3A2B8E70F300C9417D /* CBORDecodingTests.swift */; }; - 1A6647852AF986990041C134 /* Base58Swift in Frameworks */ = {isa = PBXBuildFile; productRef = 1A6647842AF986990041C134 /* Base58Swift */; }; - 1A6647882AF987A00041C134 /* Base58Swift in Frameworks */ = {isa = PBXBuildFile; productRef = 1A6647872AF987A00041C134 /* Base58Swift */; }; - 1A66478A2AF987A60041C134 /* Automerge in Frameworks */ = {isa = PBXBuildFile; productRef = 1A6647892AF987A60041C134 /* Automerge */; }; - 1A6CB2652B71CABF00CA23E9 /* String+hexEncoding.swift in Sources */ = {isa = PBXBuildFile; fileRef = 1A6CB2642B71CABF00CA23E9 /* String+hexEncoding.swift */; }; 1A6FF21D2B64710700C99F81 /* WebSocketStatusView.swift in Sources */ = {isa = PBXBuildFile; fileRef = 1A6FF21C2B64710700C99F81 /* WebSocketStatusView.swift */; }; 1A7700C52A67343800869A4D /* PeerSyncView.swift in Sources */ = {isa = PBXBuildFile; fileRef = 1A7700C42A67343800869A4D /* PeerSyncView.swift */; }; 1A7700C72A67479F00869A4D /* NWBrowserResultItemView.swift in Sources */ = {isa = PBXBuildFile; fileRef = 1A7700C62A67479F00869A4D /* NWBrowserResultItemView.swift */; }; - 1A8217AD2A6877730071DD38 /* DocumentSyncCoordinator.swift in Sources */ = {isa = PBXBuildFile; fileRef = 1A8217AC2A6877730071DD38 /* DocumentSyncCoordinator.swift */; }; - 1A9272E32B61A529000CB9E5 /* WebsocketSyncConnection.swift in Sources */ = {isa = PBXBuildFile; fileRef = 1A9272E22B61A529000CB9E5 /* WebsocketSyncConnection.swift */; }; - 1A9272E62B61AC2C000CB9E5 /* WebSocketMessages.swift in Sources */ = {isa = PBXBuildFile; fileRef = 1A9272E52B61AC2C000CB9E5 /* WebSocketMessages.swift */; }; - 1AADE1F42B586801000205BB /* PotentCodables in Frameworks */ = {isa = PBXBuildFile; productRef = 1AADE1F32B586801000205BB /* PotentCodables */; }; - 1AADE1F52B586875000205BB /* Data+hexEncodedString.swift in Sources */ = {isa = PBXBuildFile; fileRef = 1A30C4DA2B155ECF007CE4F0 /* Data+hexEncodedString.swift */; }; - 1AADE1F62B586878000205BB /* UUID+bs58String.swift in Sources */ = {isa = PBXBuildFile; fileRef = 1A30C4DC2B155F07007CE4F0 /* UUID+bs58String.swift */; }; - 1AADE1F72B58687E000205BB /* DocumentId.swift in Sources */ = {isa = PBXBuildFile; fileRef = 1A2521432B1567F20096951D /* DocumentId.swift */; }; + 1A9D231A2B940D23007F3A16 /* AutomergeRepo in Frameworks */ = {isa = PBXBuildFile; productRef = 1A9D23192B940D23007F3A16 /* AutomergeRepo */; }; 1AC103972B7EB0EF0099296C /* PrivacyInfo.xcprivacy in Resources */ = {isa = PBXBuildFile; fileRef = 1AC103962B7EB0EF0099296C /* PrivacyInfo.xcprivacy */; }; - 1ACF1CE12B6DCAFC00DC5198 /* Task+timeout.swift in Sources */ = {isa = PBXBuildFile; fileRef = 1ACF1CE02B6DCAFC00DC5198 /* Task+timeout.swift */; }; - 1AD5DA332A464FBD0085DF79 /* Automerge in Frameworks */ = {isa = PBXBuildFile; productRef = 1AD5DA322A464FBD0085DF79 /* Automerge */; }; 1AD5DA352A4650520085DF79 /* MeetingNotesModel.swift in Sources */ = {isa = PBXBuildFile; fileRef = 1AD5DA342A4650520085DF79 /* MeetingNotesModel.swift */; }; 1AD71E8E2A57622B00B965BF /* MeetingNotesDocumentView.swift in Sources */ = {isa = PBXBuildFile; fileRef = 1AD71E8D2A57622B00B965BF /* MeetingNotesDocumentView.swift */; }; 1AD71E912A57630B00B965BF /* MergeView.swift in Sources */ = {isa = PBXBuildFile; fileRef = 1AD71E902A57630B00B965BF /* MergeView.swift */; }; 1AD71E932A5765A800B965BF /* SyncStatusView.swift in Sources */ = {isa = PBXBuildFile; fileRef = 1AD71E922A5765A800B965BF /* SyncStatusView.swift */; }; - 1AD7A8E52A4767A1003B6A1E /* PotentCodables in Frameworks */ = {isa = PBXBuildFile; productRef = 1AD7A8E42A4767A1003B6A1E /* PotentCodables */; }; - 1AD7A8EB2A476801003B6A1E /* NWParameters+peerSyncParameters.swift in Sources */ = {isa = PBXBuildFile; fileRef = 1AD7A8E62A476801003B6A1E /* NWParameters+peerSyncParameters.swift */; }; - 1AD7A8ED2A476801003B6A1E /* P2PAutomergeSyncProtocol.swift in Sources */ = {isa = PBXBuildFile; fileRef = 1AD7A8E82A476801003B6A1E /* P2PAutomergeSyncProtocol.swift */; }; - 1AD7A8EF2A476801003B6A1E /* SyncConnection.swift in Sources */ = {isa = PBXBuildFile; fileRef = 1AD7A8EA2A476801003B6A1E /* SyncConnection.swift */; }; - 1ADE51672B7A90AA00EA44F0 /* WebSocketSyncIntegrationTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 1ADE51662B7A90AA00EA44F0 /* WebSocketSyncIntegrationTests.swift */; }; - 1ADE51682B7A94B100EA44F0 /* String+hexEncoding.swift in Sources */ = {isa = PBXBuildFile; fileRef = 1A6CB2642B71CABF00CA23E9 /* String+hexEncoding.swift */; }; - 1ADE51692B7A94BC00EA44F0 /* Task+timeout.swift in Sources */ = {isa = PBXBuildFile; fileRef = 1ACF1CE02B6DCAFC00DC5198 /* Task+timeout.swift */; }; - 1ADE516A2B7A94BC00EA44F0 /* WebSocketMessages.swift in Sources */ = {isa = PBXBuildFile; fileRef = 1A9272E52B61AC2C000CB9E5 /* WebSocketMessages.swift */; }; - 1ADE516B2B7A94BC00EA44F0 /* WebsocketSyncConnection.swift in Sources */ = {isa = PBXBuildFile; fileRef = 1A9272E22B61A529000CB9E5 /* WebsocketSyncConnection.swift */; }; - 1ADE516C2B7A953500EA44F0 /* OSLog+extensions.swift in Sources */ = {isa = PBXBuildFile; fileRef = 1AE3D77A2A68646A00CEA52E /* OSLog+extensions.swift */; }; - 1AE3D77B2A68646A00CEA52E /* OSLog+extensions.swift in Sources */ = {isa = PBXBuildFile; fileRef = 1AE3D77A2A68646A00CEA52E /* OSLog+extensions.swift */; }; 1AF4DDDA2B7C57E800B23BF8 /* ExportView.swift in Sources */ = {isa = PBXBuildFile; fileRef = 1AF4DDD92B7C57E800B23BF8 /* ExportView.swift */; }; - 1AF9017F2B66E47A0077567D /* CBORExperiments.swift in Sources */ = {isa = PBXBuildFile; fileRef = 1AF9017E2B66E47A0077567D /* CBORExperiments.swift */; }; /* End PBXBuildFile section */ /* Begin PBXContainerItemProxy section */ @@ -91,38 +70,21 @@ 1A0DDC532A464DEB001ECADD /* MeetingNotesUITests.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = MeetingNotesUITests.swift; sourceTree = ""; }; 1A0DDC552A464DEB001ECADD /* MeetingNotesUITestsLaunchTests.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = MeetingNotesUITestsLaunchTests.swift; sourceTree = ""; }; 1A0DDC622A464E2D001ECADD /* MeetingNotes.xctestplan */ = {isa = PBXFileReference; lastKnownFileType = text; path = MeetingNotes.xctestplan; sourceTree = ""; }; - 1A11B8912B7AC37100CB4CA9 /* URLSessionWebSocketTask+sendPing.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = "URLSessionWebSocketTask+sendPing.swift"; sourceTree = ""; }; - 1A2521432B1567F20096951D /* DocumentId.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = DocumentId.swift; sourceTree = ""; }; - 1A27AB422B768C0300A29BC3 /* Automerge-swift-privacy-manifest.bundle */ = {isa = PBXFileReference; lastKnownFileType = "wrapper.plug-in"; path = "Automerge-swift-privacy-manifest.bundle"; sourceTree = ""; }; + 1A273DC02B93EBD000B321C5 /* Packages */ = {isa = PBXFileReference; lastKnownFileType = folder; path = Packages; sourceTree = ""; }; + 1A273DD62B93F64500B321C5 /* Logger+extensions.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = "Logger+extensions.swift"; sourceTree = ""; }; 1A2A02A42A50E74A0044064B /* EditableAgendaItemView.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = EditableAgendaItemView.swift; sourceTree = ""; }; 1A2AD0302A7437E200EF0C5F /* SyncConnectionView.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = SyncConnectionView.swift; sourceTree = ""; }; - 1A30C4DA2B155ECF007CE4F0 /* Data+hexEncodedString.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = "Data+hexEncodedString.swift"; sourceTree = ""; }; - 1A30C4DC2B155F07007CE4F0 /* UUID+bs58String.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = "UUID+bs58String.swift"; sourceTree = ""; }; - 1A4A610B2A6F05BE00E097F1 /* TimeInterval+milliseconds.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = "TimeInterval+milliseconds.swift"; sourceTree = ""; }; - 1A5F2D3A2B8E70F300C9417D /* CBORDecodingTests.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = CBORDecodingTests.swift; sourceTree = ""; }; - 1A6CB2642B71CABF00CA23E9 /* String+hexEncoding.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = "String+hexEncoding.swift"; sourceTree = ""; }; 1A6FF21C2B64710700C99F81 /* WebSocketStatusView.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = WebSocketStatusView.swift; sourceTree = ""; }; 1A7700C42A67343800869A4D /* PeerSyncView.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = PeerSyncView.swift; sourceTree = ""; }; 1A7700C62A67479F00869A4D /* NWBrowserResultItemView.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = NWBrowserResultItemView.swift; sourceTree = ""; }; - 1A8217AC2A6877730071DD38 /* DocumentSyncCoordinator.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = DocumentSyncCoordinator.swift; sourceTree = ""; }; - 1A9272E22B61A529000CB9E5 /* WebsocketSyncConnection.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = WebsocketSyncConnection.swift; sourceTree = ""; }; - 1A9272E52B61AC2C000CB9E5 /* WebSocketMessages.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = WebSocketMessages.swift; sourceTree = ""; }; - 1AA529342B6D973C0084F855 /* notes */ = {isa = PBXFileReference; lastKnownFileType = folder; path = notes; sourceTree = ""; }; 1AC103962B7EB0EF0099296C /* PrivacyInfo.xcprivacy */ = {isa = PBXFileReference; lastKnownFileType = text.xml; path = PrivacyInfo.xcprivacy; sourceTree = ""; }; - 1ACF1CE02B6DCAFC00DC5198 /* Task+timeout.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = "Task+timeout.swift"; sourceTree = ""; }; 1AD5DA342A4650520085DF79 /* MeetingNotesModel.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = MeetingNotesModel.swift; sourceTree = ""; }; 1AD71E8D2A57622B00B965BF /* MeetingNotesDocumentView.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = MeetingNotesDocumentView.swift; sourceTree = ""; }; 1AD71E902A57630B00B965BF /* MergeView.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = MergeView.swift; sourceTree = ""; }; 1AD71E922A5765A800B965BF /* SyncStatusView.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = SyncStatusView.swift; sourceTree = ""; }; - 1AD7A8E62A476801003B6A1E /* NWParameters+peerSyncParameters.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = "NWParameters+peerSyncParameters.swift"; sourceTree = ""; }; - 1AD7A8E82A476801003B6A1E /* P2PAutomergeSyncProtocol.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = P2PAutomergeSyncProtocol.swift; sourceTree = ""; }; - 1AD7A8EA2A476801003B6A1E /* SyncConnection.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = SyncConnection.swift; sourceTree = ""; }; - 1ADE51662B7A90AA00EA44F0 /* WebSocketSyncIntegrationTests.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = WebSocketSyncIntegrationTests.swift; sourceTree = ""; }; - 1AE3D77A2A68646A00CEA52E /* OSLog+extensions.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = "OSLog+extensions.swift"; sourceTree = ""; }; 1AF4DDD92B7C57E800B23BF8 /* ExportView.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = ExportView.swift; sourceTree = ""; }; 1AF5DB3A2A4A0C38008DAC6F /* README.md */ = {isa = PBXFileReference; lastKnownFileType = net.daringfireball.markdown; path = README.md; sourceTree = ""; }; 1AF5DB3B2A4A0C5E008DAC6F /* CONTRIBUTING.md */ = {isa = PBXFileReference; lastKnownFileType = net.daringfireball.markdown; path = CONTRIBUTING.md; sourceTree = ""; }; - 1AF9017E2B66E47A0077567D /* CBORExperiments.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = CBORExperiments.swift; sourceTree = ""; }; /* End PBXFileReference section */ /* Begin PBXFrameworksBuildPhase section */ @@ -130,9 +92,11 @@ isa = PBXFrameworksBuildPhase; buildActionMask = 2147483647; files = ( - 1A6647852AF986990041C134 /* Base58Swift in Frameworks */, - 1AD5DA332A464FBD0085DF79 /* Automerge in Frameworks */, - 1AD7A8E52A4767A1003B6A1E /* PotentCodables in Frameworks */, + 1A273DCC2B93EEA600B321C5 /* Base58Swift in Frameworks */, + 1A273DCF2B93EEBB00B321C5 /* Automerge in Frameworks */, + 1A273DC92B93EE9300B321C5 /* PotentCodables in Frameworks */, + 1A273DC32B93EC8D00B321C5 /* AutomergeRepo in Frameworks */, + 1A273DC62B93ECF400B321C5 /* AutomergeRepo in Frameworks */, ); runOnlyForDeploymentPostprocessing = 0; }; @@ -140,9 +104,10 @@ isa = PBXFrameworksBuildPhase; buildActionMask = 2147483647; files = ( - 1AADE1F42B586801000205BB /* PotentCodables in Frameworks */, - 1A66478A2AF987A60041C134 /* Automerge in Frameworks */, - 1A6647882AF987A00041C134 /* Base58Swift in Frameworks */, + 1A9D231A2B940D23007F3A16 /* AutomergeRepo in Frameworks */, + 1A273DD32B93EEEC00B321C5 /* Base58Swift in Frameworks */, + 1A273DD12B93EEE200B321C5 /* PotentCodables in Frameworks */, + 1A273DD52B93EEF700B321C5 /* Automerge in Frameworks */, ); runOnlyForDeploymentPostprocessing = 0; }; @@ -162,8 +127,8 @@ 1AC103962B7EB0EF0099296C /* PrivacyInfo.xcprivacy */, 1AF5DB3A2A4A0C38008DAC6F /* README.md */, 1AF5DB3B2A4A0C5E008DAC6F /* CONTRIBUTING.md */, - 1AA529342B6D973C0084F855 /* notes */, 1A0DDC622A464E2D001ECADD /* MeetingNotes.xctestplan */, + 1A273DC02B93EBD000B321C5 /* Packages */, 1A0DDC332A464DEA001ECADD /* MeetingNotes */, 1A0DDC482A464DEB001ECADD /* MeetingNotesTests */, 1A0DDC522A464DEB001ECADD /* MeetingNotesUITests */, @@ -187,12 +152,6 @@ children = ( 1A0DDC342A464DEA001ECADD /* MeetingNotesApp.swift */, 1AB369012A50D82C00F855F8 /* Views */, - 1A74051D2B6C3BE100E88B18 /* Sync */, - 1AE3D77A2A68646A00CEA52E /* OSLog+extensions.swift */, - 1A30C4DA2B155ECF007CE4F0 /* Data+hexEncodedString.swift */, - 1A6CB2642B71CABF00CA23E9 /* String+hexEncoding.swift */, - 1A30C4DC2B155F07007CE4F0 /* UUID+bs58String.swift */, - 1A2521432B1567F20096951D /* DocumentId.swift */, 1A0DDC362A464DEA001ECADD /* MeetingNotesDocument.swift */, 1AD5DA342A4650520085DF79 /* MeetingNotesModel.swift */, 1A0916FF2A4A171C00D80BF7 /* Documentation.docc */, @@ -200,7 +159,7 @@ 1A0DDC3E2A464DEB001ECADD /* Preview Content */, 1A0DDC3C2A464DEB001ECADD /* Info.plist */, 1A0DDC3D2A464DEB001ECADD /* MeetingNotes.entitlements */, - 1A27AB422B768C0300A29BC3 /* Automerge-swift-privacy-manifest.bundle */, + 1A273DD62B93F64500B321C5 /* Logger+extensions.swift */, ); path = MeetingNotes; sourceTree = ""; @@ -217,10 +176,6 @@ isa = PBXGroup; children = ( 1A0DDC492A464DEB001ECADD /* MeetingNotesTests.swift */, - 1AF9017E2B66E47A0077567D /* CBORExperiments.swift */, - 1A5F2D3A2B8E70F300C9417D /* CBORDecodingTests.swift */, - 1ADE51662B7A90AA00EA44F0 /* WebSocketSyncIntegrationTests.swift */, - 1A11B8912B7AC37100CB4CA9 /* URLSessionWebSocketTask+sendPing.swift */, ); path = MeetingNotesTests; sourceTree = ""; @@ -241,26 +196,6 @@ name = Frameworks; sourceTree = ""; }; - 1A74051D2B6C3BE100E88B18 /* Sync */ = { - isa = PBXGroup; - children = ( - 1A8217AC2A6877730071DD38 /* DocumentSyncCoordinator.swift */, - 1AD7A8F02A476807003B6A1E /* PeerNetworking */, - 1A9272E42B61A800000CB9E5 /* WebSocketNetworking */, - ); - path = Sync; - sourceTree = ""; - }; - 1A9272E42B61A800000CB9E5 /* WebSocketNetworking */ = { - isa = PBXGroup; - children = ( - 1A9272E22B61A529000CB9E5 /* WebsocketSyncConnection.swift */, - 1A9272E52B61AC2C000CB9E5 /* WebSocketMessages.swift */, - 1ACF1CE02B6DCAFC00DC5198 /* Task+timeout.swift */, - ); - path = WebSocketNetworking; - sourceTree = ""; - }; 1AB369012A50D82C00F855F8 /* Views */ = { isa = PBXGroup; children = ( @@ -277,17 +212,6 @@ path = Views; sourceTree = ""; }; - 1AD7A8F02A476807003B6A1E /* PeerNetworking */ = { - isa = PBXGroup; - children = ( - 1AD7A8E62A476801003B6A1E /* NWParameters+peerSyncParameters.swift */, - 1A4A610B2A6F05BE00E097F1 /* TimeInterval+milliseconds.swift */, - 1AD7A8E82A476801003B6A1E /* P2PAutomergeSyncProtocol.swift */, - 1AD7A8EA2A476801003B6A1E /* SyncConnection.swift */, - ); - path = PeerNetworking; - sourceTree = ""; - }; /* End PBXGroup section */ /* Begin PBXNativeTarget section */ @@ -305,9 +229,11 @@ ); name = MeetingNotes; packageProductDependencies = ( - 1AD5DA322A464FBD0085DF79 /* Automerge */, - 1AD7A8E42A4767A1003B6A1E /* PotentCodables */, - 1A6647842AF986990041C134 /* Base58Swift */, + 1A273DC22B93EC8D00B321C5 /* AutomergeRepo */, + 1A273DC52B93ECF400B321C5 /* AutomergeRepo */, + 1A273DC82B93EE9300B321C5 /* PotentCodables */, + 1A273DCB2B93EEA600B321C5 /* Base58Swift */, + 1A273DCE2B93EEBB00B321C5 /* Automerge */, ); productName = MeetingNotes; productReference = 1A0DDC312A464DEA001ECADD /* MeetingNotes.app */; @@ -328,9 +254,10 @@ ); name = MeetingNotesTests; packageProductDependencies = ( - 1A6647872AF987A00041C134 /* Base58Swift */, - 1A6647892AF987A60041C134 /* Automerge */, - 1AADE1F32B586801000205BB /* PotentCodables */, + 1A273DD02B93EEE200B321C5 /* PotentCodables */, + 1A273DD22B93EEEC00B321C5 /* Base58Swift */, + 1A273DD42B93EEF700B321C5 /* Automerge */, + 1A9D23192B940D23007F3A16 /* AutomergeRepo */, ); productName = MeetingNotesTests; productReference = 1A0DDC452A464DEB001ECADD /* MeetingNotesTests.xctest */; @@ -387,9 +314,10 @@ ); mainGroup = 1A0DDC282A464DEA001ECADD; packageReferences = ( - 1AD5DA312A464FBD0085DF79 /* XCRemoteSwiftPackageReference "automerge-swift" */, - 1AD7A8E32A4767A1003B6A1E /* XCRemoteSwiftPackageReference "PotentCodables" */, - 1A6647832AF986990041C134 /* XCRemoteSwiftPackageReference "Base58Swift" */, + 1A273DC42B93ECF400B321C5 /* XCLocalSwiftPackageReference "Packages/automerge-repo" */, + 1A273DC72B93EE9300B321C5 /* XCRemoteSwiftPackageReference "PotentCodables" */, + 1A273DCA2B93EEA600B321C5 /* XCRemoteSwiftPackageReference "Base58Swift" */, + 1A273DCD2B93EEBA00B321C5 /* XCRemoteSwiftPackageReference "automerge-swift" */, ); productRefGroup = 1A0DDC322A464DEA001ECADD /* Products */; projectDirPath = ""; @@ -434,32 +362,20 @@ isa = PBXSourcesBuildPhase; buildActionMask = 2147483647; files = ( - 1AE3D77B2A68646A00CEA52E /* OSLog+extensions.swift in Sources */, - 1A9272E62B61AC2C000CB9E5 /* WebSocketMessages.swift in Sources */, 1A0DDC372A464DEA001ECADD /* MeetingNotesDocument.swift in Sources */, 1AD5DA352A4650520085DF79 /* MeetingNotesModel.swift in Sources */, 1A2AD0312A7437E200EF0C5F /* SyncConnectionView.swift in Sources */, 1A2A02A52A50E74B0044064B /* EditableAgendaItemView.swift in Sources */, - 1AD7A8ED2A476801003B6A1E /* P2PAutomergeSyncProtocol.swift in Sources */, 1A0917002A4A171C00D80BF7 /* Documentation.docc in Sources */, - 1A30C4DD2B155F07007CE4F0 /* UUID+bs58String.swift in Sources */, - 1A30C4DB2B155ECF007CE4F0 /* Data+hexEncodedString.swift in Sources */, 1AD71E912A57630B00B965BF /* MergeView.swift in Sources */, 1A6FF21D2B64710700C99F81 /* WebSocketStatusView.swift in Sources */, 1A0DDC352A464DEA001ECADD /* MeetingNotesApp.swift in Sources */, 1AD71E8E2A57622B00B965BF /* MeetingNotesDocumentView.swift in Sources */, - 1A8217AD2A6877730071DD38 /* DocumentSyncCoordinator.swift in Sources */, - 1A9272E32B61A529000CB9E5 /* WebsocketSyncConnection.swift in Sources */, + 1A273DD72B93F64500B321C5 /* Logger+extensions.swift in Sources */, 1A7700C72A67479F00869A4D /* NWBrowserResultItemView.swift in Sources */, 1AD71E932A5765A800B965BF /* SyncStatusView.swift in Sources */, - 1A6CB2652B71CABF00CA23E9 /* String+hexEncoding.swift in Sources */, - 1ACF1CE12B6DCAFC00DC5198 /* Task+timeout.swift in Sources */, - 1AD7A8EF2A476801003B6A1E /* SyncConnection.swift in Sources */, 1A7700C52A67343800869A4D /* PeerSyncView.swift in Sources */, 1AF4DDDA2B7C57E800B23BF8 /* ExportView.swift in Sources */, - 1AD7A8EB2A476801003B6A1E /* NWParameters+peerSyncParameters.swift in Sources */, - 1A4A610C2A6F05BE00E097F1 /* TimeInterval+milliseconds.swift in Sources */, - 1A2521442B1567F20096951D /* DocumentId.swift in Sources */, ); runOnlyForDeploymentPostprocessing = 0; }; @@ -468,18 +384,6 @@ buildActionMask = 2147483647; files = ( 1A0DDC4A2A464DEB001ECADD /* MeetingNotesTests.swift in Sources */, - 1ADE51672B7A90AA00EA44F0 /* WebSocketSyncIntegrationTests.swift in Sources */, - 1ADE516C2B7A953500EA44F0 /* OSLog+extensions.swift in Sources */, - 1AADE1F52B586875000205BB /* Data+hexEncodedString.swift in Sources */, - 1AADE1F62B586878000205BB /* UUID+bs58String.swift in Sources */, - 1AADE1F72B58687E000205BB /* DocumentId.swift in Sources */, - 1AF9017F2B66E47A0077567D /* CBORExperiments.swift in Sources */, - 1A11B8922B7AC37100CB4CA9 /* URLSessionWebSocketTask+sendPing.swift in Sources */, - 1ADE51682B7A94B100EA44F0 /* String+hexEncoding.swift in Sources */, - 1A5F2D3B2B8E70F300C9417D /* CBORDecodingTests.swift in Sources */, - 1ADE51692B7A94BC00EA44F0 /* Task+timeout.swift in Sources */, - 1ADE516A2B7A94BC00EA44F0 /* WebSocketMessages.swift in Sources */, - 1ADE516B2B7A94BC00EA44F0 /* WebsocketSyncConnection.swift in Sources */, ); runOnlyForDeploymentPostprocessing = 0; }; @@ -568,6 +472,7 @@ ONLY_ACTIVE_ARCH = YES; SWIFT_ACTIVE_COMPILATION_CONDITIONS = DEBUG; SWIFT_OPTIMIZATION_LEVEL = "-Onone"; + SWIFT_STRICT_CONCURRENCY = complete; }; name = Debug; }; @@ -624,6 +529,7 @@ MTL_FAST_MATH = YES; SWIFT_COMPILATION_MODE = wholemodule; SWIFT_OPTIMIZATION_LEVEL = "-O"; + SWIFT_STRICT_CONCURRENCY = complete; }; name = Release; }; @@ -862,63 +768,83 @@ }; /* End XCConfigurationList section */ +/* Begin XCLocalSwiftPackageReference section */ + 1A273DC42B93ECF400B321C5 /* XCLocalSwiftPackageReference "Packages/automerge-repo" */ = { + isa = XCLocalSwiftPackageReference; + relativePath = "Packages/automerge-repo"; + }; +/* End XCLocalSwiftPackageReference section */ + /* Begin XCRemoteSwiftPackageReference section */ - 1A6647832AF986990041C134 /* XCRemoteSwiftPackageReference "Base58Swift" */ = { + 1A273DC72B93EE9300B321C5 /* XCRemoteSwiftPackageReference "PotentCodables" */ = { isa = XCRemoteSwiftPackageReference; - repositoryURL = "https://github.com/keefertaylor/Base58Swift"; + repositoryURL = "https://github.com/outfoxx/PotentCodables"; requirement = { kind = upToNextMajorVersion; - minimumVersion = 2.1.14; + minimumVersion = 3.2.0; }; }; - 1AD5DA312A464FBD0085DF79 /* XCRemoteSwiftPackageReference "automerge-swift" */ = { + 1A273DCA2B93EEA600B321C5 /* XCRemoteSwiftPackageReference "Base58Swift" */ = { isa = XCRemoteSwiftPackageReference; - repositoryURL = "https://github.com/automerge/automerge-swift/"; + repositoryURL = "https://github.com/keefertaylor/Base58Swift"; requirement = { kind = upToNextMajorVersion; - minimumVersion = 0.5.0; + minimumVersion = 2.1.14; }; }; - 1AD7A8E32A4767A1003B6A1E /* XCRemoteSwiftPackageReference "PotentCodables" */ = { + 1A273DCD2B93EEBA00B321C5 /* XCRemoteSwiftPackageReference "automerge-swift" */ = { isa = XCRemoteSwiftPackageReference; - repositoryURL = "https://github.com/outfoxx/PotentCodables"; + repositoryURL = "https://github.com/automerge/automerge-swift/"; requirement = { kind = upToNextMajorVersion; - minimumVersion = 3.1.0; + minimumVersion = 0.5.8; }; }; /* End XCRemoteSwiftPackageReference section */ /* Begin XCSwiftPackageProductDependency section */ - 1A6647842AF986990041C134 /* Base58Swift */ = { + 1A273DC22B93EC8D00B321C5 /* AutomergeRepo */ = { isa = XCSwiftPackageProductDependency; - package = 1A6647832AF986990041C134 /* XCRemoteSwiftPackageReference "Base58Swift" */; - productName = Base58Swift; + productName = AutomergeRepo; }; - 1A6647872AF987A00041C134 /* Base58Swift */ = { + 1A273DC52B93ECF400B321C5 /* AutomergeRepo */ = { isa = XCSwiftPackageProductDependency; - package = 1A6647832AF986990041C134 /* XCRemoteSwiftPackageReference "Base58Swift" */; + productName = AutomergeRepo; + }; + 1A273DC82B93EE9300B321C5 /* PotentCodables */ = { + isa = XCSwiftPackageProductDependency; + package = 1A273DC72B93EE9300B321C5 /* XCRemoteSwiftPackageReference "PotentCodables" */; + productName = PotentCodables; + }; + 1A273DCB2B93EEA600B321C5 /* Base58Swift */ = { + isa = XCSwiftPackageProductDependency; + package = 1A273DCA2B93EEA600B321C5 /* XCRemoteSwiftPackageReference "Base58Swift" */; productName = Base58Swift; }; - 1A6647892AF987A60041C134 /* Automerge */ = { + 1A273DCE2B93EEBB00B321C5 /* Automerge */ = { isa = XCSwiftPackageProductDependency; - package = 1AD5DA312A464FBD0085DF79 /* XCRemoteSwiftPackageReference "automerge-swift" */; + package = 1A273DCD2B93EEBA00B321C5 /* XCRemoteSwiftPackageReference "automerge-swift" */; productName = Automerge; }; - 1AADE1F32B586801000205BB /* PotentCodables */ = { + 1A273DD02B93EEE200B321C5 /* PotentCodables */ = { isa = XCSwiftPackageProductDependency; - package = 1AD7A8E32A4767A1003B6A1E /* XCRemoteSwiftPackageReference "PotentCodables" */; + package = 1A273DC72B93EE9300B321C5 /* XCRemoteSwiftPackageReference "PotentCodables" */; productName = PotentCodables; }; - 1AD5DA322A464FBD0085DF79 /* Automerge */ = { + 1A273DD22B93EEEC00B321C5 /* Base58Swift */ = { isa = XCSwiftPackageProductDependency; - package = 1AD5DA312A464FBD0085DF79 /* XCRemoteSwiftPackageReference "automerge-swift" */; + package = 1A273DCA2B93EEA600B321C5 /* XCRemoteSwiftPackageReference "Base58Swift" */; + productName = Base58Swift; + }; + 1A273DD42B93EEF700B321C5 /* Automerge */ = { + isa = XCSwiftPackageProductDependency; + package = 1A273DCD2B93EEBA00B321C5 /* XCRemoteSwiftPackageReference "automerge-swift" */; productName = Automerge; }; - 1AD7A8E42A4767A1003B6A1E /* PotentCodables */ = { + 1A9D23192B940D23007F3A16 /* AutomergeRepo */ = { isa = XCSwiftPackageProductDependency; - package = 1AD7A8E32A4767A1003B6A1E /* XCRemoteSwiftPackageReference "PotentCodables" */; - productName = PotentCodables; + package = 1A273DC42B93ECF400B321C5 /* XCLocalSwiftPackageReference "Packages/automerge-repo" */; + productName = AutomergeRepo; }; /* End XCSwiftPackageProductDependency section */ }; diff --git a/MeetingNotes/Automerge-swift-privacy-manifest.bundle/Contents/Info.plist b/MeetingNotes/Automerge-swift-privacy-manifest.bundle/Contents/Info.plist deleted file mode 100644 index 0a7e3249..00000000 --- a/MeetingNotes/Automerge-swift-privacy-manifest.bundle/Contents/Info.plist +++ /dev/null @@ -1,46 +0,0 @@ - - - - - BuildMachineOSBuild - 23D60 - CFBundleDevelopmentRegion - en - CFBundleExecutable - Automerge-swift-privacy-manifest - CFBundleIdentifier - com.rhonabwy.Automerge-swift-privacy-manifest - CFBundleInfoDictionaryVersion - 6.0 - CFBundleName - Automerge-swift-privacy-manifest - CFBundlePackageType - BNDL - CFBundleShortVersionString - 1.0 - CFBundleSupportedPlatforms - - MacOSX - - CFBundleVersion - 1 - DTCompiler - com.apple.compilers.llvm.clang.1_0 - DTPlatformBuild - - DTPlatformName - macosx - DTPlatformVersion - 14.4 - DTSDKBuild - 23E5191c - DTSDKName - macosx14.4 - DTXcode - 1530 - DTXcodeBuild - 15E5188j - LSMinimumSystemVersion - 14.3 - - diff --git a/MeetingNotes/Automerge-swift-privacy-manifest.bundle/Contents/Resources/PrivacyInfo.xcprivacy b/MeetingNotes/Automerge-swift-privacy-manifest.bundle/Contents/Resources/PrivacyInfo.xcprivacy deleted file mode 100644 index 8ceb89cc..00000000 --- a/MeetingNotes/Automerge-swift-privacy-manifest.bundle/Contents/Resources/PrivacyInfo.xcprivacy +++ /dev/null @@ -1,18 +0,0 @@ - - - - - NSPrivacyTracking - - NSPrivacyTrackingDomains - - NSPrivacyCollectedDataTypes - - - - NSPrivacyAccessedAPITypes - - - - - diff --git a/MeetingNotes/Automerge-swift-privacy-manifest.bundle/Contents/_CodeSignature/CodeDirectory b/MeetingNotes/Automerge-swift-privacy-manifest.bundle/Contents/_CodeSignature/CodeDirectory deleted file mode 100644 index 5135a7e2..00000000 Binary files a/MeetingNotes/Automerge-swift-privacy-manifest.bundle/Contents/_CodeSignature/CodeDirectory and /dev/null differ diff --git a/MeetingNotes/Automerge-swift-privacy-manifest.bundle/Contents/_CodeSignature/CodeRequirements b/MeetingNotes/Automerge-swift-privacy-manifest.bundle/Contents/_CodeSignature/CodeRequirements deleted file mode 100644 index dbf9d614..00000000 Binary files a/MeetingNotes/Automerge-swift-privacy-manifest.bundle/Contents/_CodeSignature/CodeRequirements and /dev/null differ diff --git a/MeetingNotes/Automerge-swift-privacy-manifest.bundle/Contents/_CodeSignature/CodeRequirements-1 b/MeetingNotes/Automerge-swift-privacy-manifest.bundle/Contents/_CodeSignature/CodeRequirements-1 deleted file mode 100644 index 240115a4..00000000 Binary files a/MeetingNotes/Automerge-swift-privacy-manifest.bundle/Contents/_CodeSignature/CodeRequirements-1 and /dev/null differ diff --git a/MeetingNotes/Automerge-swift-privacy-manifest.bundle/Contents/_CodeSignature/CodeResources b/MeetingNotes/Automerge-swift-privacy-manifest.bundle/Contents/_CodeSignature/CodeResources deleted file mode 100644 index 72320c31..00000000 --- a/MeetingNotes/Automerge-swift-privacy-manifest.bundle/Contents/_CodeSignature/CodeResources +++ /dev/null @@ -1,132 +0,0 @@ - - - - - files - - Resources/PrivacyInfo.xcprivacy - - DeXR/KjPyTMvgjQPRGZqmpsYC9Y= - - - files2 - - Resources/PrivacyInfo.xcprivacy - - hash - - DeXR/KjPyTMvgjQPRGZqmpsYC9Y= - - hash2 - - 0h/7j1Z8Km0J3DtYlZR2+M8urNwSWp8zjduJx79KjQo= - - - - rules - - ^Resources/ - - ^Resources/.*\.lproj/ - - optional - - weight - 1000 - - ^Resources/.*\.lproj/locversion.plist$ - - omit - - weight - 1100 - - ^Resources/Base\.lproj/ - - weight - 1010 - - ^version.plist$ - - - rules2 - - .*\.dSYM($|/) - - weight - 11 - - ^(.*/)?\.DS_Store$ - - omit - - weight - 2000 - - ^(Frameworks|SharedFrameworks|PlugIns|Plug-ins|XPCServices|Helpers|MacOS|Library/(Automator|Spotlight|LoginItems))/ - - nested - - weight - 10 - - ^.* - - ^Info\.plist$ - - omit - - weight - 20 - - ^PkgInfo$ - - omit - - weight - 20 - - ^Resources/ - - weight - 20 - - ^Resources/.*\.lproj/ - - optional - - weight - 1000 - - ^Resources/.*\.lproj/locversion.plist$ - - omit - - weight - 1100 - - ^Resources/Base\.lproj/ - - weight - 1010 - - ^[^/]+$ - - nested - - weight - 10 - - ^embedded\.provisionprofile$ - - weight - 20 - - ^version\.plist$ - - weight - 20 - - - - diff --git a/MeetingNotes/Automerge-swift-privacy-manifest.bundle/Contents/_CodeSignature/CodeSignature b/MeetingNotes/Automerge-swift-privacy-manifest.bundle/Contents/_CodeSignature/CodeSignature deleted file mode 100644 index e69de29b..00000000 diff --git a/MeetingNotes/Data+hexEncodedString.swift b/MeetingNotes/Data+hexEncodedString.swift deleted file mode 100644 index 7a986566..00000000 --- a/MeetingNotes/Data+hexEncodedString.swift +++ /dev/null @@ -1,8 +0,0 @@ -import Foundation - -public extension Data { - func hexEncodedString(uppercase: Bool = false) -> String { - let format = uppercase ? "%02hhX" : "%02hhx" - return map { String(format: format, $0) }.joined() - } -} diff --git a/MeetingNotes/Logger+extensions.swift b/MeetingNotes/Logger+extensions.swift new file mode 100644 index 00000000..b0d77f05 --- /dev/null +++ b/MeetingNotes/Logger+extensions.swift @@ -0,0 +1,14 @@ +import Foundation +import OSLog + +// using : @unchecked Sendable here because I think Logger _is_ sendable, +// but isn't yet marked as such. Alternatively I think we could use @preconcurrency import, +// but doing this due to the conversation on the forums (Mar2024): +// https://forums.swift.org/t/preconcurrency-doesnt-suppress-static-property-concurrency-warnings/70469/2 +extension Logger: @unchecked Sendable { + /// Using your bundle identifier is a great way to ensure a unique identifier. + private nonisolated static let subsystem = Bundle.main.bundleIdentifier! + + /// Logs the Document interactions, such as saving and loading. + static let document = Logger(subsystem: subsystem, category: "Document") +} diff --git a/MeetingNotes/MeetingNotesApp.swift b/MeetingNotes/MeetingNotesApp.swift index 757ba496..3050ab32 100644 --- a/MeetingNotes/MeetingNotesApp.swift +++ b/MeetingNotes/MeetingNotesApp.swift @@ -1,8 +1,6 @@ +import AutomergeRepo import SwiftUI -/// A shared instance of a document sync coordinator. -let sharedSyncCoordinator = DocumentSyncCoordinator() - /// The document-based Meeting Notes application. @main struct MeetingNotesApp: App { diff --git a/MeetingNotes/MeetingNotesDocument.swift b/MeetingNotes/MeetingNotesDocument.swift index d02ef39a..8433476b 100644 --- a/MeetingNotes/MeetingNotesDocument.swift +++ b/MeetingNotes/MeetingNotesDocument.swift @@ -1,4 +1,5 @@ import Automerge +import AutomergeRepo import Combine import OSLog import PotentCBOR @@ -19,13 +20,15 @@ extension UTType { struct WrappedAutomergeDocument: Codable { let id: DocumentId let data: Data - static let fileEncoder = CBOREncoder() - static let fileDecoder = CBORDecoder() } extension WrappedAutomergeDocument: Transferable { static var transferRepresentation: some TransferRepresentation { - CodableRepresentation(contentType: .meetingnote, encoder: fileEncoder, decoder: fileDecoder) + CodableRepresentation( + contentType: .meetingnote, + encoder: CBORCoder.encoder, + decoder: CBORCoder.decoder + ) } } diff --git a/MeetingNotes/OSLog+extensions.swift b/MeetingNotes/OSLog+extensions.swift deleted file mode 100644 index fe87ad38..00000000 --- a/MeetingNotes/OSLog+extensions.swift +++ /dev/null @@ -1,18 +0,0 @@ -import OSLog - -extension Logger { - /// Using your bundle identifier is a great way to ensure a unique identifier. - private static var subsystem = Bundle.main.bundleIdentifier! - - /// Logs the Document interactions, such as saving and loading. - static let document = Logger(subsystem: subsystem, category: "Document") - - /// Logs updates and interaction related to watching for external peer systems. - static let syncController = Logger(subsystem: subsystem, category: "SyncController") - - /// Logs updates and interaction related to the process of synchronization over the network. - static let syncConnection = Logger(subsystem: subsystem, category: "SyncConnection") - - /// Logs updates and interaction related to the process of synchronization over the network. - static let webSocket = Logger(subsystem: subsystem, category: "WebSocket") -} diff --git a/MeetingNotes/Sync/WebSocketNetworking/Task+timeout.swift b/MeetingNotes/Sync/WebSocketNetworking/Task+timeout.swift deleted file mode 100644 index 0025a548..00000000 --- a/MeetingNotes/Sync/WebSocketNetworking/Task+timeout.swift +++ /dev/null @@ -1,32 +0,0 @@ -// import Foundation -// -//// ref: https://gist.github.com/swhitty/9be89dfe97dbb55c6ef0f916273bbb97 -//// and https://forums.swift.org/t/running-an-async-task-with-a-timeout/49733 -// extension Task where Failure == Error { -// // Start a new Task with a timeout. If the timeout expires before the operation is -// // completed then the task is cancelled and an error is thrown. -// init( -// priority: TaskPriority? = nil, -// timeout: TimeInterval, -// operation: @escaping @Sendable () async throws -> Success -// ) { -// self = Task(priority: priority) { -// try await withThrowingTaskGroup(of: Success.self) { group -> Success in -// group.addTask(operation: operation) -// group.addTask { -// try await _Concurrency.Task.sleep(nanoseconds: UInt64(timeout * 1_000_000_000)) -// throw TimeoutError() -// } -// guard let success = try await group.next() else { -// throw _Concurrency.CancellationError() -// } -// group.cancelAll() -// return success -// } -// } -// } -// } -// -// private struct TimeoutError: LocalizedError { -// var errorDescription: String? = "Task timed out before completion" -// } diff --git a/MeetingNotes/Sync/WebSocketNetworking/WebSocketMessages.swift b/MeetingNotes/Sync/WebSocketNetworking/WebSocketMessages.swift deleted file mode 100644 index 5a60fc66..00000000 --- a/MeetingNotes/Sync/WebSocketNetworking/WebSocketMessages.swift +++ /dev/null @@ -1,744 +0,0 @@ -// -// WebSocketMessages.swift -// MeetingNotes -// -// Created by Joseph Heck on 1/24/24. -// - -import Foundation -import OSLog -import PotentCBOR - -// Automerge Repo WebSocket sync details: -// https://github.com/automerge/automerge-repo/blob/main/packages/automerge-repo-network-websocket/README.md -// explicitly using a protocol version "1" here - make sure to specify (and verify?) that - -// related source for the automerge-repo sync code: -// https://github.com/automerge/automerge-repo/blob/main/packages/automerge-repo-network-websocket/src/BrowserWebSocketClientAdapter.ts -// All the WebSocket messages are CBOR encoded and sent as data streams - -/// A type that encapsulates valid V1 protocol messages for the Automerge-repo sync protocol. -public indirect enum V1 { - // CDDL pre-amble - // ; The base64 encoded bytes of a Peer ID - // peer_id = str - // ; The base64 encoded bytes of a Storage ID - // storage_id = str - // ; The possible protocol versions (currently always the string "1") - // protocol_version = "1" - // ; The bytes of an automerge sync message - // sync_message = bstr - // ; The base58check encoded bytes of a document ID - // document_id = str - - /// A type that represents a peer - /// - /// Typically a UUID4 in string form. - public typealias PEER_ID = String - - /// A type that represents an identity for the storage of a peer. - /// - /// Typically a UUID4 in string form. Receiving peers may tie cached sync state for documents to this identifier. - public typealias STORAGE_ID = String - - /// A type that represents a document Id. - /// - /// Typically 16 bytes encoded in bs58 format. - public typealias DOCUMENT_ID = String - - /// A type that represents the raw bytes of an Automerge sync message. - public typealias SYNC_MESSAGE = Data - - static let encoder = CBOREncoder() - static let decoder = CBORDecoder() - - /// The collection of value "type" strings for the V1 automerge-repo protocol. - enum MsgTypes { - static var peer = "peer" - static var sync = "sync" - static var ephemeral = "ephemeral" - static var error = "error" - static var unavailable = "doc-unavailable" - static var join = "join" - static var remoteHeadsChanged = "remote-heads-changed" - static var request = "request" - static var remoteSubscriptionChange = "remote-subscription-change" - } - - case peer(PeerMsg) - case join(JoinMsg) - case error(ErrorMsg) - case request(RequestMsg) - case sync(SyncMsg) - case unavailable(UnavailableMsg) - // ephemeral - case ephemeral(EphemeralMsg) - // gossip additions - case remoteSubscriptionChange(RemoteSubscriptionChangeMsg) - case remoteHeadsChanged(RemoteHeadsChangedMsg) - // fall-through scenario - unknown message - case unknown(Data) - - /// Attempts to decode the data you provide as a peer message. - /// - /// - Parameter data: The data to decode - /// - Returns: The decoded message, or ``V1/unknown(_:)`` if the decoding attempt failed. - public static func decodePeer(_ data: Data) -> V1 { - if let peerMsg = attemptPeer(data) { - return .peer(peerMsg) - } else { - return .unknown(data) - } - } - - /// Exhaustively attempt to decode incoming data as V1 protocol messages. - /// - /// - Parameters: - /// - data: The data to decode. - /// - withGossip: A Boolean value that indicates whether to include decoding of handshake messages. - /// - withHandshake: A Boolean value that indicates whether to include decoding of gossip messages. - /// - Returns: The decoded message, or ``V1/unknown(_:)`` if the previous decoding attempts failed. - /// - /// The decoding is ordered from the perspective of an initiating client expecting a response to minimize attempts. - /// Enable `withGossip` to attempt to decode head gossip messages, and `withHandshake` to include handshake phase - /// messages. - /// With both `withGossip` and `withHandshake` set to `true`, the decoding is exhaustive over all V1 messages. - public static func decode(_ data: Data) -> V1 { - var cborMsg: CBOR? = nil - - // attempt to deserialize CBOR message (in order to read the type from it) - do { - cborMsg = try CBORSerialization.cbor(from: data) - } catch { - Logger.webSocket.warning("Unable to CBOR decode incoming data: \(data)") - return .unknown(data) - } - // read the "type" of the message in order to choose the appropriate decoding path - guard let msgType = cborMsg?.mapValue?["type"]?.utf8StringValue else { - return .unknown(data) - } - - switch msgType { - case MsgTypes.peer: - if let peerMsg = attemptPeer(data) { - return .peer(peerMsg) - } - case MsgTypes.sync: - if let syncMsg = attemptSync(data) { - return .sync(syncMsg) - } - case MsgTypes.ephemeral: - if let ephemeralMsg = attemptEphemeral(data) { - return .ephemeral(ephemeralMsg) - } - case MsgTypes.error: - if let errorMsg = attemptError(data) { - return .error(errorMsg) - } - case MsgTypes.unavailable: - if let unavailableMsg = attemptUnavailable(data) { - return .unavailable(unavailableMsg) - } - case MsgTypes.join: - if let joinMsg = attemptJoin(data) { - return .join(joinMsg) - } - case MsgTypes.remoteHeadsChanged: - if let remoteHeadsChanged = attemptRemoteHeadsChanged(data) { - return .remoteHeadsChanged(remoteHeadsChanged) - } - case MsgTypes.request: - if let requestMsg = attemptRequest(data) { - return .request(requestMsg) - } - case MsgTypes.remoteSubscriptionChange: - if let remoteSubChangeMsg = attemptRemoteSubscriptionChange(data) { - return .remoteSubscriptionChange(remoteSubChangeMsg) - } - - default: - return .unknown(data) - } - return .unknown(data) - } - - // sync phase messages - - static func attemptSync(_ data: Data) -> SyncMsg? { - do { - return try decoder.decode(SyncMsg.self, from: data) - } catch { - Logger.webSocket.warning("Failed to decode data as SyncMsg") - } - return nil - } - - static func attemptRequest(_ data: Data) -> RequestMsg? { - do { - return try decoder.decode(RequestMsg.self, from: data) - } catch { - Logger.webSocket.warning("Failed to decode data as RequestMsg") - } - return nil - } - - static func attemptUnavailable(_ data: Data) -> UnavailableMsg? { - do { - return try decoder.decode(UnavailableMsg.self, from: data) - } catch { - Logger.webSocket.warning("Failed to decode data as UnavailableMsg") - } - return nil - } - - // handshake phase messages - - static func attemptPeer(_ data: Data) -> PeerMsg? { - do { - return try decoder.decode(PeerMsg.self, from: data) - } catch { - Logger.webSocket.warning("Failed to decode data as PeerMsg") - } - return nil - } - - static func attemptJoin(_ data: Data) -> JoinMsg? { - do { - return try decoder.decode(JoinMsg.self, from: data) - } catch { - Logger.webSocket.warning("Failed to decode data as JoinMsg") - } - return nil - } - - // error - - static func attemptError(_ data: Data) -> ErrorMsg? { - do { - return try decoder.decode(ErrorMsg.self, from: data) - } catch { - Logger.webSocket.warning("Failed to decode data as ErrorMsg") - } - return nil - } - - // ephemeral - - static func attemptEphemeral(_ data: Data) -> EphemeralMsg? { - do { - return try decoder.decode(EphemeralMsg.self, from: data) - } catch { - Logger.webSocket.warning("Failed to decode data as EphemeralMsg") - } - return nil - } - - // gossip - - static func attemptRemoteHeadsChanged(_ data: Data) -> RemoteHeadsChangedMsg? { - do { - return try decoder.decode(RemoteHeadsChangedMsg.self, from: data) - } catch { - Logger.webSocket.warning("Failed to decode data as RemoteHeadsChangedMsg") - } - return nil - } - - static func attemptRemoteSubscriptionChange(_ data: Data) -> RemoteSubscriptionChangeMsg? { - do { - return try decoder.decode(RemoteSubscriptionChangeMsg.self, from: data) - } catch { - Logger.webSocket.warning("Failed to decode data as RemoteSubscriptionChangeMsg") - } - return nil - } - - // encode messages - - public static func encode(_ msg: JoinMsg) throws -> Data { - try encoder.encode(msg) - } - - public static func encode(_ msg: RequestMsg) throws -> Data { - try encoder.encode(msg) - } - - public static func encode(_ msg: SyncMsg) throws -> Data { - try encoder.encode(msg) - } - - public static func encode(_ msg: PeerMsg) throws -> Data { - try encoder.encode(msg) - } - - public static func encode(_ msg: UnavailableMsg) throws -> Data { - try encoder.encode(msg) - } - - public static func encode(_ msg: EphemeralMsg) throws -> Data { - try encoder.encode(msg) - } - - public static func encode(_ msg: RemoteSubscriptionChangeMsg) throws -> Data { - try encoder.encode(msg) - } - - public static func encode(_ msg: RemoteHeadsChangedMsg) throws -> Data { - try encoder.encode(msg) - } - - public static func encode(_ msg: ErrorMsg) throws -> Data { - try encoder.encode(msg) - } - - public static func encode(_ msg: V1) throws -> Data { - // not sure this is useful, but might as well finish out the set... - switch msg { - case let .peer(peerMsg): - return try encode(peerMsg) - case let .join(joinMsg): - return try encode(joinMsg) - case let .error(errorMsg): - return try encode(errorMsg) - case let .request(requestMsg): - return try encode(requestMsg) - case let .sync(syncMsg): - return try encode(syncMsg) - case let .unavailable(unavailableMsg): - return try encode(unavailableMsg) - case let .ephemeral(ephemeralMsg): - return try encode(ephemeralMsg) - case let .remoteSubscriptionChange(remoteSubscriptionChangeMsg): - return try encode(remoteSubscriptionChangeMsg) - case let .remoteHeadsChanged(remoteHeadsChangedMsg): - return try encode(remoteHeadsChangedMsg) - case let .unknown(data): - return data - } - } -} - -extension V1: CustomDebugStringConvertible { - public var debugDescription: String { - switch self { - case let .peer(interior_msg): - return interior_msg.debugDescription - case let .join(interior_msg): - return interior_msg.debugDescription - case let .error(interior_msg): - return interior_msg.debugDescription - case let .request(interior_msg): - return interior_msg.debugDescription - case let .sync(interior_msg): - return interior_msg.debugDescription - case let .unavailable(interior_msg): - return interior_msg.debugDescription - case let .ephemeral(interior_msg): - return interior_msg.debugDescription - case let .remoteSubscriptionChange(interior_msg): - return interior_msg.debugDescription - case let .remoteHeadsChanged(interior_msg): - return interior_msg.debugDescription - case let .unknown(data): - return "UNKNOWN[data: \(data.hexEncodedString(uppercase: false))]" - } - } -} - -public extension V1 { - // ; Metadata sent in either the join or peer message types - // peer_metadata = { - // ; The storage ID of this peer - // ? storageId: storage_id, - // ; Whether the sender expects to connect again with this storage ID - // isEphemeral: bool - // } - - struct PeerMetadata: Codable, CustomDebugStringConvertible { - public var storageId: STORAGE_ID? - public var isEphemeral: Bool - - public init(storageId: STORAGE_ID? = nil, isEphemeral: Bool) { - self.storageId = storageId - self.isEphemeral = isEphemeral - } - - public var debugDescription: String { - "[storageId: \(storageId ?? "nil"), ephemeral: \(isEphemeral)]" - } - } - - // - join - - // { - // type: "join", - // senderId: peer_id, - // supportedProtocolVersions: protocol_version - // ? metadata: peer_metadata, - // } - - // MARK: Join/Peer - - /// A message that indicates a desire to peer and sync documents. - /// - /// Sent by the initiating peer (represented by `senderId`) to initiate a connection to manage documents between - /// peers. - /// The next response is expected to be a ``PeerMsg``. If any other message is received after sending `JoinMsg`, the - /// initiating client should disconnect. - /// If the receiving peer receives any message other than a `JoinMsg` from the initiating peer, it is expected to - /// terminate the connection. - struct JoinMsg: Codable, CustomDebugStringConvertible { - public var type: String = V1.MsgTypes.join - public let senderId: PEER_ID - public var supportedProtocolVersions: String = "1" - public var peerMetadata: PeerMetadata? - - public init(senderId: PEER_ID, metadata: PeerMetadata? = nil) { - self.senderId = senderId - if let metadata { - self.peerMetadata = metadata - } - } - - public var debugDescription: String { - "JOIN[version: \(supportedProtocolVersions), sender: \(senderId), metadata: \(peerMetadata?.debugDescription ?? "nil")]" - } - } - - // - peer - (expected response to join) - // { - // type: "peer", - // senderId: peer_id, - // selectedProtocolVersion: protocol_version, - // targetId: peer_id, - // ? metadata: peer_metadata, - // } - - // example output from sync.automerge.org: - // { - // "type": "peer", - // "senderId": "storage-server-sync-automerge-org", - // "peerMetadata": {"storageId": "3760df37-a4c6-4f66-9ecd-732039a9385d", "isEphemeral": false}, - // "selectedProtocolVersion": "1", - // "targetId": "FA38A1B2-1433-49E7-8C3C-5F63C117DF09" - // } - - /// A message that acknowledges a join request. - /// - /// A response sent by a receiving peer (represented by `targetId`) after receiving a ``JoinMsg`` that indicates - /// sync, - /// gossiping, and ephemeral messages may now be initiated. - struct PeerMsg: Codable, CustomDebugStringConvertible { - public var type: String = V1.MsgTypes.peer - public let senderId: PEER_ID - public let targetId: PEER_ID - public var peerMetadata: PeerMetadata? - public var selectedProtocolVersion: String - - public init(senderId: PEER_ID, targetId: PEER_ID, storageId: String?, ephemeral: Bool = true) { - self.senderId = senderId - self.targetId = targetId - self.selectedProtocolVersion = "1" - self.peerMetadata = PeerMetadata(storageId: storageId, isEphemeral: ephemeral) - } - - public var debugDescription: String { - "PEER[version: \(selectedProtocolVersion), sender: \(senderId), target: \(targetId), metadata: \(peerMetadata?.debugDescription ?? "nil")]" - } - } - - // - error - - // { - // type: "error", - // message: str, - // } - - /// A sync error message - struct ErrorMsg: Codable, CustomDebugStringConvertible { - public var type: String = V1.MsgTypes.error - public let message: String - - public init(message: String) { - self.message = message - } - - public var debugDescription: String { - "ERROR[msg: \(message)" - } - } - - // MARK: Sync - - // - request - - // { - // type: "request", - // documentId: document_id, - // ; The peer requesting to begin sync - // senderId: peer_id, - // targetId: peer_id, - // ; The initial automerge sync message from the sender - // data: sync_message - // } - - /// A request to synchronize an Automerge document. - /// - /// Sent when the initiating peer (represented by `senderId`) is asking to begin sync for the given document ID. - /// Identical to ``SyncMsg`` but indicates to the receiving peer that the sender would like an ``UnavailableMsg`` - /// message if the receiving peer (represented by `targetId` does not have the document (identified by - /// `documentId`). - struct RequestMsg: Codable, CustomDebugStringConvertible { - public var type: String = V1.MsgTypes.request - public let documentId: DOCUMENT_ID - public let senderId: PEER_ID // The peer requesting to begin sync - public let targetId: PEER_ID - public let data: Data // The initial automerge sync message from the sender - - public init(documentId: DOCUMENT_ID, senderId: PEER_ID, targetId: PEER_ID, sync_message: Data) { - self.documentId = documentId - self.senderId = senderId - self.targetId = targetId - self.data = sync_message - } - - public var debugDescription: String { - "REQUEST[documentId: \(documentId), sender: \(senderId), target: \(targetId), data: \(data.count) bytes]" - } - } - - // - sync - - // { - // type: "sync", - // documentId: document_id, - // ; The peer requesting to begin sync - // senderId: peer_id, - // targetId: peer_id, - // ; The initial automerge sync message from the sender - // data: sync_message - // } - - /// A request to synchronize an Automerge document. - /// - /// Sent when the initiating peer (represented by `senderId`) is asking to begin sync for the given document ID. - /// Use `SyncMsg` instead of `RequestMsg` when you are creating a new Automerge document that you want to share. - /// - /// If the receiving peer doesn't have an Automerge document represented by `documentId` and can't or won't store - /// the - /// document. - struct SyncMsg: Codable, CustomDebugStringConvertible { - public var type = V1.MsgTypes.sync - public let documentId: DOCUMENT_ID - public let senderId: PEER_ID // The peer requesting to begin sync - public let targetId: PEER_ID - public let data: Data // The initial automerge sync message from the sender - - public init(documentId: DOCUMENT_ID, senderId: PEER_ID, targetId: PEER_ID, sync_message: Data) { - self.documentId = documentId - self.senderId = senderId - self.targetId = targetId - self.data = sync_message - } - - public var debugDescription: String { - "SYNC[documentId: \(documentId), sender: \(senderId), target: \(targetId), data: \(data.count) bytes]" - } - } - - // - unavailable - - // { - // type: "doc-unavailable", - // senderId: peer_id, - // targetId: peer_id, - // documentId: document_id, - // } - - /// A message that indicates a document is unavailable. - /// - /// Generally a response for a ``RequestMsg`` from an initiating peer (represented by `senderId`) that the receiving - /// peer (represented by `targetId`) doesn't have a copy of the requested Document, or is unable to share it. - struct UnavailableMsg: Codable, CustomDebugStringConvertible { - public var type = V1.MsgTypes.unavailable - public let documentId: DOCUMENT_ID - public let senderId: PEER_ID - public let targetId: PEER_ID - - public init(documentId: DOCUMENT_ID, senderId: PEER_ID, targetId: PEER_ID) { - self.documentId = documentId - self.senderId = senderId - self.targetId = targetId - } - - public var debugDescription: String { - "UNAVAILABLE[documentId: \(documentId), sender: \(senderId), target: \(targetId)]" - } - } - - // MARK: Ephemeral - - // - ephemeral - - // { - // type: "ephemeral", - // ; The peer who sent this message - // senderId: peer_id, - // ; The target of this message - // targetId: peer_id, - // ; The sequence number of this message within its session - // count: uint, - // ; The unique session identifying this stream of ephemeral messages - // sessionId: str, - // ; The document ID this ephemera relates to - // documentId: document_id, - // ; The data of this message (in practice this is arbitrary CBOR) - // data: bstr - // } - - struct EphemeralMsg: Codable, CustomDebugStringConvertible { - public var type = V1.MsgTypes.ephemeral - public let senderId: PEER_ID - public let targetId: PEER_ID - public let count: UInt - public let sessionId: String - public let documentId: DOCUMENT_ID - public let data: Data - - public init( - senderId: PEER_ID, - targetId: PEER_ID, - count: UInt, - sessionId: String, - documentId: DOCUMENT_ID, - data: Data - ) { - self.senderId = senderId - self.targetId = targetId - self.count = count - self.sessionId = sessionId - self.documentId = documentId - self.data = data - } - - public var debugDescription: String { - "EPHEMERAL[documentId: \(documentId), sender: \(senderId), target: \(targetId), count: \(count), sessionId: \(sessionId), data: \(data.count) bytes]" - } - } - - // MARK: Head's Gossiping - - // - remote subscription changed - - // { - // type: "remote-subscription-change" - // senderId: peer_id - // targetId: peer_id - // - // ; The storage IDs to add to the subscription - // ? add: [* storage_id] - // - // ; The storage IDs to remove from the subscription - // remove: [* storage_id] - // } - - struct RemoteSubscriptionChangeMsg: Codable, CustomDebugStringConvertible { - public var type = V1.MsgTypes.remoteSubscriptionChange - public let senderId: PEER_ID - public let targetId: PEER_ID - public var add: [STORAGE_ID]? - public var remove: [STORAGE_ID] - - public init(senderId: PEER_ID, targetId: PEER_ID, add: [STORAGE_ID]? = nil, remove: [STORAGE_ID]) { - self.senderId = senderId - self.targetId = targetId - self.add = add - self.remove = remove - } - - public var debugDescription: String { - var returnString = "REMOTE_SUBSCRIPTION_CHANGE[sender: \(senderId), target: \(targetId)]" - if let add { - returnString.append("\n add: [") - returnString.append(add.joined(separator: ",")) - returnString.append("]") - } - returnString.append("\n remove: [") - returnString.append(remove.joined(separator: ",")) - returnString.append("]") - return returnString - } - } - - // - remote heads changed - // { - // type: "remote-heads-changed" - // senderId: peer_id - // targetId: peer_id - // - // ; The document ID of the document that has changed - // documentId: document_id - // - // ; A map from storage ID to the heads advertised for a given storage ID - // newHeads: { - // * storage_id => { - // ; The heads of the new document for the given storage ID as - // ; a list of base64 encoded SHA2 hashes - // heads: [* string] - // ; The local time on the node which initially sent the remote-heads-changed - // ; message as milliseconds since the unix epoch - // timestamp: uint - // } - // } - // } - - struct RemoteHeadsChangedMsg: Codable, CustomDebugStringConvertible { - public struct HeadsAtTime: Codable, CustomDebugStringConvertible { - public var heads: [String] - public let timestamp: uint - - public init(heads: [String], timestamp: uint) { - self.heads = heads - self.timestamp = timestamp - } - - public var debugDescription: String { - "\(timestamp):[\(heads.joined(separator: ","))]" - } - } - - public var type = V1.MsgTypes.remoteHeadsChanged - public let senderId: PEER_ID - public let targetId: PEER_ID - public let documentId: DOCUMENT_ID - public var newHeads: [STORAGE_ID: HeadsAtTime] - public var add: [STORAGE_ID] - public var remove: [STORAGE_ID] - - public init( - senderId: PEER_ID, - targetId: PEER_ID, - documentId: DOCUMENT_ID, - newHeads: [STORAGE_ID: HeadsAtTime], - add: [STORAGE_ID], - remove: [STORAGE_ID] - ) { - self.senderId = senderId - self.targetId = targetId - self.documentId = documentId - self.newHeads = newHeads - self.add = add - self.remove = remove - } - - public var debugDescription: String { - var returnString = - "REMOTE_HEADS_CHANGED[documentId: \(documentId), sender: \(senderId), target: \(targetId)]" - returnString.append("\n heads:") - for (storage_id, headsAtTime) in newHeads { - returnString.append("\n \(storage_id) : \(headsAtTime.debugDescription)") - } - returnString.append("\n add: [") - returnString.append(add.joined(separator: ", ")) - returnString.append("]") - - returnString.append("\n remove: [") - returnString.append(remove.joined(separator: ", ")) - returnString.append("]") - return returnString - } - } -} diff --git a/MeetingNotes/UUID+bs58String.swift b/MeetingNotes/UUID+bs58String.swift deleted file mode 100644 index 9cc1d529..00000000 --- a/MeetingNotes/UUID+bs58String.swift +++ /dev/null @@ -1,50 +0,0 @@ -import Base58Swift -import Foundation - -public extension UUID { - var uintArray: [UInt8] { - var byteblob = [UInt8](repeating: 0, count: 16) - byteblob[0] = self.uuid.0 - byteblob[1] = self.uuid.1 - byteblob[2] = self.uuid.2 - byteblob[3] = self.uuid.3 - byteblob[4] = self.uuid.4 - byteblob[5] = self.uuid.5 - byteblob[6] = self.uuid.6 - byteblob[7] = self.uuid.7 - byteblob[8] = self.uuid.8 - byteblob[9] = self.uuid.9 - byteblob[10] = self.uuid.10 - byteblob[11] = self.uuid.11 - byteblob[12] = self.uuid.12 - byteblob[13] = self.uuid.13 - byteblob[14] = self.uuid.14 - byteblob[15] = self.uuid.15 - return byteblob - } - - var data: Data { - var byteblob = Data(count: 16) - byteblob[0] = self.uuid.0 - byteblob[1] = self.uuid.1 - byteblob[2] = self.uuid.2 - byteblob[3] = self.uuid.3 - byteblob[4] = self.uuid.4 - byteblob[5] = self.uuid.5 - byteblob[6] = self.uuid.6 - byteblob[7] = self.uuid.7 - byteblob[8] = self.uuid.8 - byteblob[9] = self.uuid.9 - byteblob[10] = self.uuid.10 - byteblob[11] = self.uuid.11 - byteblob[12] = self.uuid.12 - byteblob[13] = self.uuid.13 - byteblob[14] = self.uuid.14 - byteblob[15] = self.uuid.15 - return byteblob - } - - var bs58String: String { - Base58.base58CheckEncode(self.uintArray) - } -} diff --git a/MeetingNotes/Views/EditableAgendaItemView.swift b/MeetingNotes/Views/EditableAgendaItemView.swift index 9e9d81f1..e55d3725 100644 --- a/MeetingNotes/Views/EditableAgendaItemView.swift +++ b/MeetingNotes/Views/EditableAgendaItemView.swift @@ -3,6 +3,7 @@ import OSLog import SwiftUI /// A view that provides an editable view of an agenda item. +@MainActor struct EditableAgendaItemView: View { // Document is needed within this file to link to the undo manager. @ObservedObject var document: MeetingNotesDocument diff --git a/MeetingNotes/Views/ExportView.swift b/MeetingNotes/Views/ExportView.swift index 6b5a7b1e..09ab1325 100644 --- a/MeetingNotes/Views/ExportView.swift +++ b/MeetingNotes/Views/ExportView.swift @@ -1,9 +1,11 @@ +import AutomergeRepo import OSLog import SwiftUI import UniformTypeIdentifiers /// A toolbar button to coordinate merging documents. @available(macOS 14.0, iOS 17.0, *) +@MainActor struct ExportView: View { @ObservedObject var document: MeetingNotesDocument diff --git a/MeetingNotes/Views/MeetingNotesDocumentView.swift b/MeetingNotes/Views/MeetingNotesDocumentView.swift index 6fb89ec7..a568365a 100644 --- a/MeetingNotes/Views/MeetingNotesDocumentView.swift +++ b/MeetingNotes/Views/MeetingNotesDocumentView.swift @@ -1,7 +1,9 @@ +import AutomergeRepo import OSLog import SwiftUI /// The primary document view for a MeetingNotes document. +@MainActor struct MeetingNotesDocumentView: View { @ObservedObject var document: MeetingNotesDocument // The undo manager triggers serializations and saving changes to the model @@ -51,7 +53,7 @@ struct MeetingNotesDocumentView: View { .help("Exports the underlying Automerge document") .padding(.leading) } - PeerSyncView(documentId: document.id, syncController: sharedSyncCoordinator) + PeerSyncView(documentId: document.id) } } .navigationSplitViewColumnWidth(min: 250, ideal: 250) @@ -80,7 +82,7 @@ struct MeetingNotesDocumentView: View { // including sometimes regenerating them when disk contents are updated // in the background, so register the current instance with the // sync coordinator as they become visible. - sharedSyncCoordinator.registerDocument(document) + DocumentSyncCoordinator.shared.registerDocument(document: document.doc, id: document.id) } .onReceive(document.objectWillChange, perform: { _ in if !document.model.agendas.contains(where: { agendaItem in diff --git a/MeetingNotes/Views/MergeView.swift b/MeetingNotes/Views/MergeView.swift index 17dc5f8b..a31e63e6 100644 --- a/MeetingNotes/Views/MergeView.swift +++ b/MeetingNotes/Views/MergeView.swift @@ -2,6 +2,7 @@ import OSLog import SwiftUI /// A toolbar button to coordinate merging documents. +@MainActor struct MergeView: View { @ObservedObject var document: MeetingNotesDocument diff --git a/MeetingNotes/Views/NWBrowserResultItemView.swift b/MeetingNotes/Views/NWBrowserResultItemView.swift index d9f2ced7..facade55 100644 --- a/MeetingNotes/Views/NWBrowserResultItemView.swift +++ b/MeetingNotes/Views/NWBrowserResultItemView.swift @@ -1,7 +1,9 @@ +import AutomergeRepo import Network import SwiftUI /// A view that shows nearby peers available for sync. +@MainActor struct NWBrowserResultItemView: View { var documentId: DocumentId @ObservedObject var syncController: DocumentSyncCoordinator diff --git a/MeetingNotes/Views/PeerSyncView.swift b/MeetingNotes/Views/PeerSyncView.swift index 7353522a..c2234944 100644 --- a/MeetingNotes/Views/PeerSyncView.swift +++ b/MeetingNotes/Views/PeerSyncView.swift @@ -1,16 +1,18 @@ +import AutomergeRepo import Network import SwiftUI /// A view that shows the status of peers and network syncing. +@MainActor struct PeerSyncView: View { var documentId: DocumentId - @ObservedObject var syncController: DocumentSyncCoordinator + @ObservedObject var syncController: DocumentSyncCoordinator = .shared @State var browserActive: Bool = false @State var browserStyling: Color = .primary @State private var editNamePopoverShown: Bool = false - @AppStorage(MeetingNotesDefaultKeys.sharingIdentity) private var sharingIdentity: String = DocumentSyncCoordinator + @AppStorage(SynchronizerDefaultKeys.publicPeerName) private var sharingIdentity: String = DocumentSyncCoordinator .defaultSharingIdentity() var body: some View { diff --git a/MeetingNotes/Views/SyncConnectionView.swift b/MeetingNotes/Views/SyncConnectionView.swift index 8c2595ca..1d70fa96 100644 --- a/MeetingNotes/Views/SyncConnectionView.swift +++ b/MeetingNotes/Views/SyncConnectionView.swift @@ -1,9 +1,11 @@ +import AutomergeRepo import Network import SwiftUI /// A view that displays a sync connection and its state. +@MainActor struct SyncConnectionView: View { - @ObservedObject var syncConnection: SyncConnection + @ObservedObject var syncConnection: BonjourSyncConnection func stateRepresentationView() -> some View { switch syncConnection.connectionState { diff --git a/MeetingNotes/Views/SyncStatusView.swift b/MeetingNotes/Views/SyncStatusView.swift index 022eaa35..64474602 100644 --- a/MeetingNotes/Views/SyncStatusView.swift +++ b/MeetingNotes/Views/SyncStatusView.swift @@ -1,6 +1,8 @@ +import AutomergeRepo import SwiftUI /// A toolbar button for activating sync for a document. +@MainActor struct SyncStatusView: View { @State private var syncEnabledIndicator: Bool = false var body: some View { @@ -8,9 +10,9 @@ struct SyncStatusView: View { syncEnabledIndicator.toggle() if syncEnabledIndicator { // only enable listening if an identity has been chosen - sharedSyncCoordinator.activate() + DocumentSyncCoordinator.shared.activate() } else { - sharedSyncCoordinator.deactivate() + DocumentSyncCoordinator.shared.deactivate() } } label: { Image( diff --git a/MeetingNotes/Views/WebSocketStatusView.swift b/MeetingNotes/Views/WebSocketStatusView.swift index 2dbe5ce8..89297740 100644 --- a/MeetingNotes/Views/WebSocketStatusView.swift +++ b/MeetingNotes/Views/WebSocketStatusView.swift @@ -1,8 +1,10 @@ +import AutomergeRepo import SwiftUI /// A toolbar button for activating sync for a document. +@MainActor struct WebSocketStatusView: View { - enum SyncTargets: String, CaseIterable, Identifiable { + enum SyncTargets: String, Sendable, CaseIterable, Identifiable { case local case automerge // Identifiable conformance diff --git a/MeetingNotesTests/CBORDecodingTests.swift b/MeetingNotesTests/CBORDecodingTests.swift deleted file mode 100644 index e6704eb0..00000000 --- a/MeetingNotesTests/CBORDecodingTests.swift +++ /dev/null @@ -1,14 +0,0 @@ -import MeetingNotes -import PotentCBOR -import XCTest - -final class CBORDecodingTests: XCTestCase { - func testCBORSerialization() throws { - let peerMsg = V1.PeerMsg(senderId: "senderUUID", targetId: "targetUUID", storageId: "something") - let encodedPeerMsg = try V1.encode(peerMsg) - - let x = try CBORSerialization.cbor(from: encodedPeerMsg) - XCTAssertEqual(x.mapValue?["type"]?.utf8StringValue, V1.MsgTypes.peer) - // print("CBOR data: \(x)") - } -} diff --git a/MeetingNotesTests/MeetingNotesTests.swift b/MeetingNotesTests/MeetingNotesTests.swift index d548f48b..ad04d4d4 100644 --- a/MeetingNotesTests/MeetingNotesTests.swift +++ b/MeetingNotesTests/MeetingNotesTests.swift @@ -1,4 +1,3 @@ -import Base58Swift import MeetingNotes import XCTest @@ -10,40 +9,4 @@ final class MeetingNotesTests: XCTestCase { override func tearDownWithError() throws { // This method is called after the invocation of each test method in the class. } - - func testDataLengthUUIDandAutomergeID() throws { - let exampleUUID = UUID() - let bytes: Data = exampleUUID.data - // let full = "automerge:2j9knpCseyhnK8izDmLpGP5WMdZQ" - let partial = "2j9knpCseyhnK8izDmLpGP5WMdZQ" - if let decodedBytes = Base58.base58CheckDecode(partial) { - // both are 16 bytes of data - XCTAssertEqual(bytes.count, Data(decodedBytes).count) - } - } - - func testDisplayingUUIDWithBase58() throws { - let exampleUUID = try XCTUnwrap(UUID(uuidString: "1654A0B5-43B9-48FF-B7FB-83F58F4D1D75")) - // print("hexencoded: \(exampleUUID.data.hexEncodedString())") - XCTAssertEqual("1654a0b543b948ffb7fb83f58f4d1d75", exampleUUID.data.hexEncodedString()) - let bs58Converted = Base58.base58CheckEncode(exampleUUID.uintArray) - // print("Converted: \(bs58Converted)") - XCTAssertEqual("K3YptshN5CcFZNpnnXcStizSNPU", bs58Converted) - XCTAssertEqual(exampleUUID.bs58String, bs58Converted) - } - - func testDataInAndOutWithBase58() throws { - // let full = "automerge:2j9knpCseyhnK8izDmLpGP5WMdZQ" - let partial = "2j9knpCseyhnK8izDmLpGP5WMdZQ" - if let decodedBytes = Base58.base58CheckDecode(partial) { - print(decodedBytes.count) - // AutomergeID is 16 bytes of data - XCTAssertEqual(16, Data(decodedBytes).count) - XCTAssertEqual("7bf18580944c450ea740c1f23be047ca", Data(decodedBytes).hexEncodedString()) - // print(Data(decodedBytes).hexEncodedString()) - - let reversed = Base58.base58CheckEncode(decodedBytes) - XCTAssertEqual(reversed, partial) - } - } } diff --git a/MeetingNotesUITests/MeetingNotesUITests.swift b/MeetingNotesUITests/MeetingNotesUITests.swift index fa207d0b..e4bc0d3f 100644 --- a/MeetingNotesUITests/MeetingNotesUITests.swift +++ b/MeetingNotesUITests/MeetingNotesUITests.swift @@ -17,6 +17,7 @@ final class MeetingNotesUITests: XCTestCase { } @available(macOS 14.0, iOS 17.0, *) + @MainActor func testAutomatedAccessibility() { // https://holyswift.app/xcode-15-new-feature-streamlined-accessibility-audits/ let myApp = XCUIApplication() diff --git a/MeetingNotesUITests/MeetingNotesUITestsLaunchTests.swift b/MeetingNotesUITests/MeetingNotesUITestsLaunchTests.swift index 0b67de82..fe73cd04 100644 --- a/MeetingNotesUITests/MeetingNotesUITestsLaunchTests.swift +++ b/MeetingNotesUITests/MeetingNotesUITestsLaunchTests.swift @@ -9,6 +9,7 @@ final class MeetingNotesUITestsLaunchTests: XCTestCase { continueAfterFailure = false } + @MainActor func testLaunch() throws { let app = XCUIApplication() app.launch() diff --git a/Packages/automerge-repo/.gitignore b/Packages/automerge-repo/.gitignore new file mode 100644 index 00000000..81f2a9ff --- /dev/null +++ b/Packages/automerge-repo/.gitignore @@ -0,0 +1,9 @@ +.DS_Store +/.build +/Packages +.vscode/ +xcuserdata/ +DerivedData/ +.swiftpm/configuration/registries.json +.swiftpm/xcode/package.xcworkspace/contents.xcworkspacedata +.netrc diff --git a/Packages/automerge-repo/.swift-version b/Packages/automerge-repo/.swift-version new file mode 100644 index 00000000..95ee81a4 --- /dev/null +++ b/Packages/automerge-repo/.swift-version @@ -0,0 +1 @@ +5.9 diff --git a/Packages/automerge-repo/Package.swift b/Packages/automerge-repo/Package.swift new file mode 100644 index 00000000..9ce35539 --- /dev/null +++ b/Packages/automerge-repo/Package.swift @@ -0,0 +1,66 @@ +// swift-tools-version: 5.9 + +import Foundation +import PackageDescription + +var globalSwiftSettings: [PackageDescription.SwiftSetting] = [] + +if ProcessInfo.processInfo.environment["LOCAL_BUILD"] != nil { + globalSwiftSettings.append(.enableExperimentalFeature("StrictConcurrency")) +} + +let package = Package( + name: "automerge-repo", + platforms: [.iOS(.v16), .macOS(.v13)], + products: [ + .library( + name: "AutomergeRepo", + targets: ["AutomergeRepo"] + ), + ], + dependencies: [ + .package(url: "https://github.com/automerge/automerge-swift", .upToNextMajor(from: "0.5.7")), + .package(url: "https://github.com/outfoxx/PotentCodables", .upToNextMajor(from: "3.1.0")), + .package(url: "https://github.com/keefertaylor/Base58Swift", .upToNextMajor(from: "2.1.14")), + // Combine replacement for OSS + // .package(url: "https://github.com/apple/swift-async-algorithms", from: "1.0.0"), + // Distributed Tracing + .package(url: "https://github.com/apple/swift-distributed-tracing", from: "1.0.0"), + // Testing Tracing + .package(url: "https://github.com/heckj/DistributedTracer", branch: "main"), + // this ^^ brings in a MASSIVE cascade of dependencies + ], + targets: [ + .target( + name: "AutomergeRepo", + dependencies: [ + .product(name: "Automerge", package: "automerge-swift"), + // CBOR encoding and decoding + .product(name: "PotentCodables", package: "PotentCodables"), + // BS58 representations of data + .product(name: "Base58Swift", package: "Base58Swift"), + + // Combine replacement for OSS + // .product(name: "AsyncAlgorithms", package: "swift-async-algorithms"), + + // Distributed Tracing + .product(name: "Tracing", package: "swift-distributed-tracing"), + ], + // borrowing a set of Swift6 enabling features to double-check against + // future proofing concurrency, safety, and exportable feature-creep. + swiftSettings: [ + .enableExperimentalFeature("StrictConcurrency"), + .enableUpcomingFeature("ExistentialAny"), + .enableExperimentalFeature("AccessLevelOnImport"), + .enableUpcomingFeature("InternalImportsByDefault"), + ] + ), + .testTarget( + name: "AutomergeRepoTests", + dependencies: [ + "AutomergeRepo", + .product(name: "DistributedTracer", package: "DistributedTracer"), + ] + ), + ] +) diff --git a/Packages/automerge-repo/Sources/AutomergeRepo/DocHandle.swift b/Packages/automerge-repo/Sources/AutomergeRepo/DocHandle.swift new file mode 100644 index 00000000..807cce96 --- /dev/null +++ b/Packages/automerge-repo/Sources/AutomergeRepo/DocHandle.swift @@ -0,0 +1,11 @@ +import class Automerge.Document + +public struct DocHandle: Sendable { + public let id: DocumentId + public let doc: Document + + init(id: DocumentId, doc: Document) { + self.id = id + self.doc = doc + } +} diff --git a/MeetingNotes/DocumentId.swift b/Packages/automerge-repo/Sources/AutomergeRepo/DocumentId.swift similarity index 92% rename from MeetingNotes/DocumentId.swift rename to Packages/automerge-repo/Sources/AutomergeRepo/DocumentId.swift index c6b35fbe..9a262b97 100644 --- a/MeetingNotes/DocumentId.swift +++ b/Packages/automerge-repo/Sources/AutomergeRepo/DocumentId.swift @@ -1,8 +1,9 @@ import Base58Swift -import Foundation +import struct Foundation.Data +import struct Foundation.UUID /// A type that represents an Automerge-repo compatible document identifier -public struct DocumentId: Hashable, Comparable, Identifiable { +public struct DocumentId: Sendable, Hashable, Comparable, Identifiable { /// A bs58 encoded string that represents the identifier public let id: String // Data? diff --git a/Packages/automerge-repo/Sources/AutomergeRepo/Documentation.docc/Documentation.md b/Packages/automerge-repo/Sources/AutomergeRepo/Documentation.docc/Documentation.md new file mode 100644 index 00000000..d76e6abe --- /dev/null +++ b/Packages/automerge-repo/Sources/AutomergeRepo/Documentation.docc/Documentation.md @@ -0,0 +1,31 @@ +# ``AutomergeRepo`` + +A summary of automerge-repo - the key of what you do with it + +## Overview + +introduction to automerge repo and what it provides, what problem it solves + +## Topics + +### Managing a collection of Automerge documents + +- ``AutomergeRepo/DocumentSyncCoordinator`` +- ``AutomergeRepo/DocumentId`` + +### Network Adapters + +- ``AutomergeRepo/NetworkSyncProvider`` +- ``AutomergeRepo/BonjourSyncConnection`` +- ``AutomergeRepo/WebsocketSyncConnection`` + +- ``AutomergeRepo/NetworkSubsystem`` +- ``AutomergeRepo/NetworkAdapterEvents`` +- ``AutomergeRepo/SyncUserDefaultsKeys`` +- ``AutomergeRepo/SynchronizerDefaultKeys +- ``AutomergeRepo/SyncV1`` + +### Storage Adapters + +- ``AutomergeRepo/StorageProvider`` +- ``AutomergeRepo/StorageSubsystem`` diff --git a/Packages/automerge-repo/Sources/AutomergeRepo/Documentation.docc/Resources/websocket_stragegy_request.svg b/Packages/automerge-repo/Sources/AutomergeRepo/Documentation.docc/Resources/websocket_stragegy_request.svg new file mode 100644 index 00000000..bcf085ce --- /dev/null +++ b/Packages/automerge-repo/Sources/AutomergeRepo/Documentation.docc/Resources/websocket_stragegy_request.svg @@ -0,0 +1 @@ +remotelocalremotelocalstate = "new" or "closed"state = "handshake"critical[handshaking phase]state = "peered"alt[: if unavailable]joinpeerrequestunavailablesync (if needed) \ No newline at end of file diff --git a/Packages/automerge-repo/Sources/AutomergeRepo/Documentation.docc/Resources/websocket_strategy_sync.svg b/Packages/automerge-repo/Sources/AutomergeRepo/Documentation.docc/Resources/websocket_strategy_sync.svg new file mode 100644 index 00000000..cc42cf3f --- /dev/null +++ b/Packages/automerge-repo/Sources/AutomergeRepo/Documentation.docc/Resources/websocket_strategy_sync.svg @@ -0,0 +1 @@ +remotelocalremotelocalstate = "new" or "closed"state = "handshake"critical[handshaking phase]state = "peered"joinpeersyncsync (if needed) \ No newline at end of file diff --git a/Packages/automerge-repo/Sources/AutomergeRepo/Documentation.docc/Resources/websocket_sync_states.svg b/Packages/automerge-repo/Sources/AutomergeRepo/Documentation.docc/Resources/websocket_sync_states.svg new file mode 100644 index 00000000..7f9e6c16 --- /dev/null +++ b/Packages/automerge-repo/Sources/AutomergeRepo/Documentation.docc/Resources/websocket_sync_states.svg @@ -0,0 +1 @@ +
WebsocketSyncConnection.init()
registerDocument()
await connect()
connect timeout expired
connection failed
websocket peer response
await disconnect()
websocket error
await connect()
new
handshake
closed
peered
WebSocket Sync Protocol
\ No newline at end of file diff --git a/Packages/automerge-repo/Sources/AutomergeRepo/Documentation.docc/Resources/wss_closed.svg b/Packages/automerge-repo/Sources/AutomergeRepo/Documentation.docc/Resources/wss_closed.svg new file mode 100644 index 00000000..416db124 --- /dev/null +++ b/Packages/automerge-repo/Sources/AutomergeRepo/Documentation.docc/Resources/wss_closed.svg @@ -0,0 +1 @@ +
new
handshake
closed
peered
\ No newline at end of file diff --git a/Packages/automerge-repo/Sources/AutomergeRepo/Documentation.docc/Resources/wss_handshake.svg b/Packages/automerge-repo/Sources/AutomergeRepo/Documentation.docc/Resources/wss_handshake.svg new file mode 100644 index 00000000..f1745df1 --- /dev/null +++ b/Packages/automerge-repo/Sources/AutomergeRepo/Documentation.docc/Resources/wss_handshake.svg @@ -0,0 +1 @@ +
new
handshake
closed
peered
\ No newline at end of file diff --git a/Packages/automerge-repo/Sources/AutomergeRepo/Documentation.docc/Resources/wss_initial.svg b/Packages/automerge-repo/Sources/AutomergeRepo/Documentation.docc/Resources/wss_initial.svg new file mode 100644 index 00000000..2a15058d --- /dev/null +++ b/Packages/automerge-repo/Sources/AutomergeRepo/Documentation.docc/Resources/wss_initial.svg @@ -0,0 +1 @@ +
new
handshake
closed
peered
\ No newline at end of file diff --git a/Packages/automerge-repo/Sources/AutomergeRepo/Documentation.docc/Resources/wss_peered.svg b/Packages/automerge-repo/Sources/AutomergeRepo/Documentation.docc/Resources/wss_peered.svg new file mode 100644 index 00000000..034ba72b --- /dev/null +++ b/Packages/automerge-repo/Sources/AutomergeRepo/Documentation.docc/Resources/wss_peered.svg @@ -0,0 +1 @@ +
new
handshake
closed
peered
\ No newline at end of file diff --git a/Packages/automerge-repo/Sources/AutomergeRepo/InternalDocHandle.swift b/Packages/automerge-repo/Sources/AutomergeRepo/InternalDocHandle.swift new file mode 100644 index 00000000..29153bdc --- /dev/null +++ b/Packages/automerge-repo/Sources/AutomergeRepo/InternalDocHandle.swift @@ -0,0 +1,92 @@ +import struct Automerge.ChangeHash +import class Automerge.Document +import struct Automerge.SyncState +import struct Foundation.Data + +final class InternalDocHandle { + enum DocHandleState { + case idle + case loading + case requesting + case ready + case unavailable + case deleted + } + + // NOTE: heckj - what I was originally researching how this all goes together, I + // wondered if there wasn't the concept of unloading/reloading the bytes from memory and + // onto disk when there was a storage system available - in that case, we'd need a few + // more states to this diagram (originally from `automerge-repo`) - one for 'purged' and + // an associated action PURGE - the idea being that might be invoked when an app is coming + // under memory pressure. + // + // The state itself is driven from Repo, in the `resolveDocHandle(id:)` method + + /** + * Internally we use a state machine to orchestrate document loading and/or syncing, in order to + * avoid requesting data we already have, or surfacing intermediate values to the consumer. + * + * ┌─────────────────────┬─────────TIMEOUT────►┌─────────────┐ + * ┌───┴─────┐ ┌───┴────────┐ │ unavailable │ + * ┌───────┐ ┌──FIND──┤ loading ├─REQUEST──►│ requesting ├─UPDATE──┐ └─────────────┘ + * │ idle ├──┤ └───┬─────┘ └────────────┘ │ + * └───────┘ │ │ └─►┌────────┐ + * │ └───────LOAD───────────────────────────────►│ ready │ + * └──CREATE───────────────────────────────────────────────►└────────┘ + */ + + let id: DocumentId + var doc: Automerge.Document? + var state: DocHandleState + var remoteHeads: [STORAGE_ID: Set] + var syncStates: [PEER_ID: SyncState] + + // TODO: verify that we want a timeout delay per Document, as opposed to per-Repo + var timeoutDelay: Double + + init(id: DocumentId, isNew: Bool, initialValue: Automerge.Document? = nil, timeoutDelay: Double = 1.0) { + self.id = id + self.timeoutDelay = timeoutDelay + remoteHeads = [:] + syncStates = [:] + // isNew is when we're creating content and it needs to get stored locally in a storage + // provider, if available. + if isNew { + if let newDoc = initialValue { + self.doc = newDoc + self.state = .loading + } else { + self.doc = nil + self.state = .idle + } + } else if let newDoc = initialValue { + self.doc = newDoc + self.state = .ready + } else { + self.doc = nil + self.state = .idle + } + } + + var isReady: Bool { + self.state == .ready + } + + var isDeleted: Bool { + self.state == .deleted + } + + var isUnavailable: Bool { + self.state == .unavailable + } + + // not entirely sure why this is holding data about remote heads... convenience? + // why not track within Repo? + func getRemoteHeads(id: STORAGE_ID) async -> Set? { + remoteHeads[id] + } + + func setRemoteHeads(id: STORAGE_ID, heads: Set) { + remoteHeads[id] = heads + } +} diff --git a/Packages/automerge-repo/Sources/AutomergeRepo/Networking/NetworkAdapterEvents.swift b/Packages/automerge-repo/Sources/AutomergeRepo/Networking/NetworkAdapterEvents.swift new file mode 100644 index 00000000..b773e112 --- /dev/null +++ b/Packages/automerge-repo/Sources/AutomergeRepo/Networking/NetworkAdapterEvents.swift @@ -0,0 +1,73 @@ +public struct PeerConnection: Sendable, CustomStringConvertible { + public var description: String { + if let meta = self.peerMetadata { + "\(peerId),\(meta)" + } else { + "\(peerId),nil" + } + } + + public let peerId: PEER_ID + public let peerMetadata: PeerMetadata? + + public init(peerId: PEER_ID, peerMetadata: PeerMetadata?) { + self.peerId = peerId + self.peerMetadata = peerMetadata + } +} + +public enum NetworkAdapterEvents: Sendable, CustomDebugStringConvertible { + public var debugDescription: String { + switch self { + case let .ready(payload): + "NetworkAdapterEvents.ready[\(payload)]" + case .close: + "NetworkAdapterEvents.close[]" + case let .peerCandidate(payload): + "NetworkAdapterEvents.peerCandidate[\(payload)]" + case let .peerDisconnect(payload): + "NetworkAdapterEvents.peerDisconnect[\(payload)]" + case let .message(payload): + "NetworkAdapterEvents.message[\(payload)]" + } + } + + public struct PeerDisconnectPayload: Sendable, CustomStringConvertible { + public var description: String { + "\(peerId)" + } + + // handled by Repo, relevant to Sync + let peerId: PEER_ID + + public init(peerId: PEER_ID) { + self.peerId = peerId + } + } + + case ready(payload: PeerConnection) // a network connection has been established and peered - sent by both listening + // and initiating connections + case close // handled by Repo, relevant to sync + case peerCandidate(payload: PeerConnection) // sent when a listening network adapter receives a proposed connection + // message (aka 'join') + case peerDisconnect(payload: PeerDisconnectPayload) // send when a peer connection terminates + case message(payload: SyncV1Msg) // handled by Sync +} + +// network connection overview: +// - connection established +// - initiating side sends "join" message +// - receiving side send "peer" message +// ONLY after peer message is received is the connection considered valid + +// for an outgoing connection: +// - network is ready for action +// - connect(to: SOMETHING) +// - when it receives the "peer" message, it's ready for ongoing work + +// for an incoming connection: +// - network is ready for action +// - remove peer opens a connection, we receive a "join" message +// - (peer candidate is known at that point) +// - if all is good (version matches, etc) then we send "peer" message to acknowledge +// - after that, we're ready to process protocol messages diff --git a/Packages/automerge-repo/Sources/AutomergeRepo/Networking/NetworkProvider.swift b/Packages/automerge-repo/Sources/AutomergeRepo/Networking/NetworkProvider.swift new file mode 100644 index 00000000..db8b1364 --- /dev/null +++ b/Packages/automerge-repo/Sources/AutomergeRepo/Networking/NetworkProvider.swift @@ -0,0 +1,76 @@ +// import AsyncAlgorithms + +// import protocol Combine.Publisher +import Automerge + +// https://github.com/automerge/automerge-repo/blob/main/packages/automerge-repo/src/network/NetworkAdapterInterface.ts + +/// A type that is responsible for establishing, and maintaining, a network connection for Automerge +/// +/// Types conforming to this protocol are responsible for the setup and initial handshake with other +/// peers, and flow through messages to component that owns the reference to the network adapter. +/// A higher level object is responsible for responding to sync, gossip, and other messages appropriately. +/// +/// A NetworkProvider instance can be either initiating or listening for - and responding to - a connection. +/// +/// The expected behavior when a network provide initiates a connection: +/// +/// - After the underlying transport connection is established due to a call to `connect`, the provider emits +/// ``NetworkAdapterEvents/ready(payload:)``, which includes a payload that indicates a +/// reference to the network provider (`any NetworkAdapter`). +/// - After the connection is established, the adapter sends a ``SyncV1/join(_:)`` message to request peering. +/// - When the NetworkAdapter receives a ``SyncV1/peer(_:)`` message, it emits +/// ``NetworkAdapterEvents/peerCandidate(payload:)``. +/// - If a message other than `peer` is received, the adapter should terminate the connection and emit +/// ``NetworkAdapterEvents/close``. +/// - All other messages are emitted as ``NetworkAdapterEvents/message(payload:)``. +/// - When a transport connection is closed, the adapter should emit ``NetworkAdapterEvents/peerDisconnect(payload:)``. +/// - When `disconnect` is invoked on a network provider, it should send a ``SyncV1/leave(_:)`` message, terminate the +/// connection, and emit ``NetworkAdapterEvents/close``. +/// +/// A connecting transport may optionally enable automatic reconnection on connection failure. Any configurable +/// reconnection logic exists, +/// it should be configured with a `configure` call with the relevant configuration type for the network provider. +/// +/// The expected behavior when listening for, and responding to, an incoming connection: +/// - When a connection is established, emit ``NetworkAdapterEvents/ready(payload:)``. +/// - When the transport receives a `join` message, verify that the protocols being requested are compatible. If they +/// are not, +/// return an ``SyncV1/error(_:)`` message, close the connection, and emit ``NetworkAdapterEvents/close``. +/// - When any other message is received, it is emitted with ``NetworkAdapterEvents/message(payload:)``. +/// - When the transport receives a `leave` message, close the connection and emit ``NetworkAdapterEvents/close``. +public protocol NetworkProvider: Sendable { + /// A list of all active, peered connections that the provider is maintaining. + /// + /// For an outgoing connection, this is typically a single connection. + /// For a listening connection, this could be quite a few. + var peeredConnections: [PeerConnection] { get async } + + /// For outgoing connections, the type that represents the endpoint to connect + /// For example, it could be `URL`, `NWEndpoint` for a Bonjour network, or a custom type. + associatedtype NetworkConnectionEndpoint: Sendable + + /// Initiate an outgoing connection. + func connect(to: NetworkConnectionEndpoint) async throws // aka "activate" + + /// Disconnect and terminate any existing connection. + func disconnect() async // aka "deactivate" + + /// Requests the network transport to send a message. + /// - Parameter message: The message to send. + /// - Parameter to: An option peerId to identify the recipient for the message. If nil, the message is sent to all + /// connected peers. + func send(message: SyncV1Msg, to: PEER_ID?) async + + /// Sets the delegate and configures the peer information for a Network Provider + /// - Parameter to: The instance that accepts asynchronous network events from the provider. + /// - Parameter peer: The peer ID for the network provider to use. + func setDelegate(_ delegate: any NetworkEventReceiver, as peer: PEER_ID, with metadata: PeerMetadata?) async +} + +/// A type that accepts provides a method for a Network Provider to call with network events. +public protocol NetworkEventReceiver: Sendable { + /// Receive and process an event from a Network Provider. + /// - Parameter event: The event to process. + func receiveEvent(event: NetworkAdapterEvents) async +} diff --git a/Packages/automerge-repo/Sources/AutomergeRepo/Networking/NetworkSubsystem.swift b/Packages/automerge-repo/Sources/AutomergeRepo/Networking/NetworkSubsystem.swift new file mode 100644 index 00000000..498ab848 --- /dev/null +++ b/Packages/automerge-repo/Sources/AutomergeRepo/Networking/NetworkSubsystem.swift @@ -0,0 +1,194 @@ +// import AsyncAlgorithms +import Automerge +import struct Foundation.Data +import OSLog +import PotentCBOR + +// riff +// https://github.com/automerge/automerge-repo/blob/main/packages/automerge-repo/src/network/NetworkSubsystem.ts + +/// A type that hosts network subsystems to connect to peers. +/// +/// The NetworkSubsystem instance is responsible for setting up and configuring any network providers, and responding to +/// messages from remote peers after the connection has been established. The connection handshake and peer negotiation +/// is +/// the responsibility of the network provider instance. +public actor NetworkSubsystem { + // a list of documents with a pending request for a documentId + var requestedDocuments: [DocumentId: [PEER_ID]] = [:] + + public static let encoder = CBOREncoder() + public static let decoder = CBORDecoder() + + // repo is a weak var to avoid a retain cycle - a network subsystem is + // (so far) always created with a Repo and uses it for remote data storage of documents that + // it fetches, syncs, or gossips about. + // + // TODO: revisit this and consider if the callbacks to repo should be exposed as a delegate + weak var repo: Repo? + var adapters: [any NetworkProvider] + + init() { + self.adapters = [] + } + + func setRepo(_ repo: Repo) async { + self.repo = repo + } + + func addAdapter(adapter: some NetworkProvider) async { + guard let peerId = repo?.peerId else { + fatalError("NO REPO CONFIGURED WHEN ADDING ADAPTERS") + } + await adapter.setDelegate(self, as: peerId, with: repo?.localPeerMetadata) + self.adapters.append(adapter) + } + + func startRemoteFetch(id: DocumentId) async throws { + // attempt to fetch the provided document Id from all (current) peers, returning the document + // or returning nil if the document is unavailable. + // Save the throwing scenarios for failures in connection, etc. + guard let repo else { + // invariant that there should be a valid doc handle available from the repo + throw Errors.Unavailable(id: id) + } + + let newDocument = Document() + for adapter in adapters { + for peerConnection in await adapter.peeredConnections { + // upsert the requested document into the list by peer + if var existingList = requestedDocuments[id] { + existingList.append(peerConnection.peerId) + requestedDocuments[id] = existingList + } else { + requestedDocuments[id] = [peerConnection.peerId] + } + // get a current sync state (creating one if needed for a fresh sync) + let syncState = await repo.syncState(id: id, peer: peerConnection.peerId) + + if let syncRequestData = newDocument.generateSyncMessage(state: syncState) { + await adapter.send(message: .request(SyncV1Msg.RequestMsg( + documentId: id.description, + senderId: repo.peerId, + targetId: peerConnection.peerId, + sync_message: syncRequestData + )), to: peerConnection.peerId) + } + } + } + } + + func send(message: SyncV1Msg, to: PEER_ID?) async { + for adapter in adapters { + await adapter.send(message: message, to: to) + } + } +} + +extension NetworkSubsystem: NetworkEventReceiver { + // Collection point for messages coming in from all network adapters. + // The network subsystem forwards messages from network peers to the relevant places, + // and forwards messages out to peers as needed + // + // In automerge-repo code, it appears to update information on an ephemeral information ( + // a sort of middleware) before emitting it upwards. + public func receiveEvent(event: NetworkAdapterEvents) async { + // Logger.network.trace("received event from network adapter: \(event.debugDescription)") + guard let repo else { + // No-op if there's no repo to update state or handle + // further message passing + return + } + switch event { + case let .ready(payload): + await repo.addPeerWithMetadata(peer: payload.peerId, metadata: payload.peerMetadata) + case .close: + break + // attempt to reconnect, or remove from active adapters? + case let .peerCandidate(payload): + await repo.addPeerWithMetadata(peer: payload.peerId, metadata: payload.peerMetadata) + case let .peerDisconnect(payload): + await repo.removePeer(peer: payload.peerId) + case let .message(payload): + switch payload { + case .peer, .join, .leave, .unknown: + // ERROR FOR THESE MSG TYPES - expected to be handled at adapter + Logger.network + .error( + "Unexpected message type received by network subsystem: \(payload.debugDescription, privacy: .public)" + ) + #if DEBUG + fatalError("UNEXPECTED MSG") + #endif + case let .error(errorMsg): + Logger.network + .warning( + "Error message received by network subsystem: \(errorMsg.debugDescription, privacy: .public)" + ) + case let .request(requestMsg): + await repo.handleRequest(msg: requestMsg) + case let .sync(syncMsg): + await repo.handleSync(msg: syncMsg) + case let .unavailable(unavailableMsg): + guard let docId = DocumentId(unavailableMsg.documentId) else { + Logger.network + .error( + "Invalid message Id \(unavailableMsg.documentId, privacy: .public) in unavailable msg: \(unavailableMsg.debugDescription, privacy: .public)" + ) + return + } + if let peersRequested = requestedDocuments[docId] { + // if we receive an unavailable from one peer, record it and wait until + // we receive unavailable from all available peers before marking it unavailable + let remainingPeersPending = peersRequested.filter { peerId in + // include the peers OTHER than the one sending the unavailable msg + peerId != unavailableMsg.senderId + } + if remainingPeersPending.isEmpty { + await repo.markDocUnavailable(id: docId) + requestedDocuments.removeValue(forKey: docId) + } else { + // handle the scenario where we started with more adapters but + // lost a connection... + + var currentConnectedPeers: [PEER_ID] = [] + for adapter in self.adapters { + let connectedPeers: [PEER_ID] = await adapter.peeredConnections + .map { peerConnection in + peerConnection.peerId + } + currentConnectedPeers.append(contentsOf: connectedPeers) + } + let stillPending = remainingPeersPending.compactMap { peerId in + if currentConnectedPeers.contains(peerId) { + peerId + } else { + nil + } + } + // save the data back for other adapters to respond later... + requestedDocuments[docId] = stillPending + } + } else { + // no peers are waiting to hear about a requested document, ignore + return + } + case let .ephemeral(ephemeralMsg): + Logger.network + .error( + "UNIMPLEMENTED EPHEMERAL MESSAGE PASSING: \(ephemeralMsg.debugDescription, privacy: .public)" + ) + case let .remoteSubscriptionChange(remoteSubscriptionChangeMsg): + Logger.network + .error( + "UNIMPLEMENTED EPHEMERAL MESSAGE PASSING: \(remoteSubscriptionChangeMsg.debugDescription, privacy: .public)" + ) + case let .remoteHeadsChanged(remoteHeadsChangedMsg): + Logger.network + .error( + "UNIMPLEMENTED EPHEMERAL MESSAGE PASSING: \(remoteHeadsChangedMsg.debugDescription, privacy: .public)" + ) + } + } + } +} diff --git a/MeetingNotes/Sync/PeerNetworking/SyncConnection.swift b/Packages/automerge-repo/Sources/AutomergeRepo/Networking/PeerNetworking/BonjourSyncConnection.swift similarity index 88% rename from MeetingNotes/Sync/PeerNetworking/SyncConnection.swift rename to Packages/automerge-repo/Sources/AutomergeRepo/Networking/PeerNetworking/BonjourSyncConnection.swift index 9d29985e..0c4edc60 100644 --- a/MeetingNotes/Sync/PeerNetworking/SyncConnection.swift +++ b/Packages/automerge-repo/Sources/AutomergeRepo/Networking/PeerNetworking/BonjourSyncConnection.swift @@ -23,10 +23,11 @@ import OSLog /// As soon as it is established, it attempts to commence a sync operation (send and expect to receive sync messages). /// In addition, it includes an optional `trigger` in its initializer that, when it receives any signal value, kicks off /// another attempt to sync the relevant Automerge document. -final class SyncConnection: ObservableObject { +@MainActor +public final class BonjourSyncConnection: ObservableObject { /// A unique identifier to track the connections for comparison against existing connections. var connectionId = UUID() - var shortId: String { + public var shortId: String { // "41ee739d-c827-4be8-9a4f-c44a492e76cf" String(connectionId.uuidString.lowercased().suffix(8)) } @@ -37,16 +38,16 @@ final class SyncConnection: ObservableObject { var connection: NWConnection? /// A Boolean value that indicates this app initiated this connection. - @Published var connectionState: NWConnection.State = .setup - @Published var endpoint: NWEndpoint? + @Published public var connectionState: NWConnection.State = .setup + @Published public var endpoint: NWEndpoint? /// The peer Id for the connection endpoint, only set on outbound connections. var peerId: String? - /// The synchronisation state associated with this connection. + /// The synchronization state associated with this connection. var syncState: SyncState /// The cancellable subscription to the trigger mechanism that attempts sync updates. - var syncTriggerCancellable: Cancellable? + var syncTriggerCancellable: (any Cancellable)? /// Initiate a connection to a network endpoint to synchronise an Automerge Document. /// - Parameters: @@ -95,8 +96,8 @@ final class SyncConnection: ObservableObject { } /// Cancels the current connection. - func cancel() { - if let connection = connection { + public func cancel() { + if let connection { syncTriggerCancellable?.cancel() if let peerId { Logger.syncConnection @@ -117,12 +118,12 @@ final class SyncConnection: ObservableObject { // Handle starting the peer-to-peer connection for both inbound and outbound connections. private func startConnection(_ trigger: AnyPublisher) { - guard let connection = connection else { + guard let connection else { return } - syncTriggerCancellable = trigger.sink(receiveValue: { _ in - if let automergeDoc = sharedSyncCoordinator.documents[self.documentId]?.value?.doc, + syncTriggerCancellable = trigger.sink(receiveValue: { @MainActor _ in + if let automergeDoc = DocumentSyncCoordinator.shared.documents[self.documentId]?.value, let syncData = automergeDoc.generateSyncMessage(state: self.syncState), self.connectionState == .ready { @@ -134,7 +135,7 @@ final class SyncConnection: ObservableObject { } }) - connection.stateUpdateHandler = { [weak self] newState in + connection.stateUpdateHandler = { @MainActor [weak self] newState in guard let self else { return } self.connectionState = newState @@ -160,7 +161,7 @@ final class SyncConnection: ObservableObject { // Cancel the connection upon a failure. connection.cancel() self.syncTriggerCancellable?.cancel() - sharedSyncCoordinator.removeConnection(self.connectionId) + DocumentSyncCoordinator.shared.removeConnection(self.connectionId) self.syncTriggerCancellable = nil case .cancelled: @@ -169,7 +170,7 @@ final class SyncConnection: ObservableObject { "\(self.shortId, privacy: .public): CANCEL \(endpoint.debugDescription, privacy: .public) connection." ) self.syncTriggerCancellable?.cancel() - sharedSyncCoordinator.removeConnection(self.connectionId) + DocumentSyncCoordinator.shared.removeConnection(self.connectionId) self.syncTriggerCancellable = nil case let .waiting(nWError): @@ -221,7 +222,7 @@ final class SyncConnection: ObservableObject { /// Receive a message from the sync protocol framing, deliver it to the delegate for processing, and continue /// receiving messages. private func receiveNextMessage() { - guard let connection = connection else { + guard let connection else { return } @@ -258,7 +259,7 @@ final class SyncConnection: ObservableObject { /// - Parameter documentId: The document Id to send. func sendDocumentId(_ documentId: DocumentId) { // corresponds to SyncMessageType.id - guard let connection = connection else { + guard let connection else { return } @@ -281,7 +282,7 @@ final class SyncConnection: ObservableObject { /// Sends an Automerge sync data packet. /// - Parameter syncMsg: The data to send. func sendSyncMsg(_ syncMsg: Data) { - guard let connection = connection else { + guard let connection else { Logger.syncConnection .error("\(self.shortId, privacy: .public): PeerConnection doesn't have an active connection!") return @@ -303,8 +304,8 @@ final class SyncConnection: ObservableObject { ) } - func receivedMessage(content data: Data?, message: NWProtocolFramer.Message, from endpoint: NWEndpoint) { - guard let document = sharedSyncCoordinator.documents[self.documentId]?.value else { + @MainActor func receivedMessage(content data: Data?, message: NWProtocolFramer.Message, from endpoint: NWEndpoint) { + guard let document = DocumentSyncCoordinator.shared.documents[self.documentId]?.value else { Logger.syncConnection .warning( "\(self.shortId, privacy: .public): received msg for unregistered document \(self.documentId, privacy: .public) from \(endpoint.debugDescription, privacy: .public)" @@ -313,7 +314,7 @@ final class SyncConnection: ObservableObject { return } switch message.syncMessageType { - case .invalid: + case .unknown: Logger.syncConnection .error( "\(self.shortId, privacy: .public): Invalid message received from \(endpoint.debugDescription, privacy: .public)" @@ -329,7 +330,7 @@ final class SyncConnection: ObservableObject { do { // When we receive a complete sync message from the underlying transport, // update our automerge document, and the associated SyncState. - let patches = try document.doc.receiveSyncMessageWithPatches( + let patches = try document.receiveSyncMessageWithPatches( state: syncState, message: data ) @@ -337,11 +338,10 @@ final class SyncConnection: ObservableObject { .debug( "\(self.shortId, privacy: .public): Received \(patches.count, privacy: .public) patches in \(data.count, privacy: .public) bytes" ) - try document.getModelUpdates() // Once the Automerge doc is updated, check (using the SyncState) to see if // we believe we need to send additional messages to the peer to keep it in sync. - if let response = document.doc.generateSyncMessage(state: syncState) { + if let response = document.generateSyncMessage(state: syncState) { sendSyncMsg(response) } else { // When generateSyncMessage returns nil, the remote endpoint represented by @@ -357,9 +357,27 @@ final class SyncConnection: ObservableObject { } case .id: Logger.syncConnection.info("\(self.shortId, privacy: .public): received request for document ID") - sendDocumentId(document.id) + sendDocumentId(self.documentId) + case .peer: + break + case .leave: + break + case .join: + break + case .request: + break + case .unavailable: + break + case .ephemeral: + break + case .syncerror: + break + case .remoteHeadsChanged: + break + case .remoteSubscriptionChange: + break } } } -extension SyncConnection: Identifiable {} +extension BonjourSyncConnection: Identifiable {} diff --git a/MeetingNotes/Sync/PeerNetworking/NWParameters+peerSyncParameters.swift b/Packages/automerge-repo/Sources/AutomergeRepo/Networking/PeerNetworking/NWParameters+peerSyncParameters.swift similarity index 100% rename from MeetingNotes/Sync/PeerNetworking/NWParameters+peerSyncParameters.swift rename to Packages/automerge-repo/Sources/AutomergeRepo/Networking/PeerNetworking/NWParameters+peerSyncParameters.swift diff --git a/MeetingNotes/Sync/PeerNetworking/P2PAutomergeSyncProtocol.swift b/Packages/automerge-repo/Sources/AutomergeRepo/Networking/PeerNetworking/P2PAutomergeSyncProtocol.swift similarity index 94% rename from MeetingNotes/Sync/PeerNetworking/P2PAutomergeSyncProtocol.swift rename to Packages/automerge-repo/Sources/AutomergeRepo/Networking/PeerNetworking/P2PAutomergeSyncProtocol.swift index 318bc081..b95a83ab 100644 --- a/MeetingNotes/Sync/PeerNetworking/P2PAutomergeSyncProtocol.swift +++ b/Packages/automerge-repo/Sources/AutomergeRepo/Networking/PeerNetworking/P2PAutomergeSyncProtocol.swift @@ -19,12 +19,18 @@ import OSLog /// The type of sync message for the Automerge network sync protocol. enum P2PSyncMessageType: UInt32 { - // TODO(heckj): is there benefit to dropping this down to a UInt8? Or does 4 bytes - // fit some other optimization that's not as obvious? - - case invalid = 0 // msg isn't a recognized type + case unknown = 0 // msg isn't a recognized type case sync = 1 // msg is generated sync data to merge into an Automerge document case id = 2 // msg is a unique for the source/master of a document to know if they've been cloned + case peer = 3 + case join = 4 + case request = 5 + case unavailable = 6 + case ephemeral = 7 + case syncerror = 8 + case remoteHeadsChanged = 9 + case remoteSubscriptionChange = 10 + case leave = 11 } /// The definition of the Automerge network sync protocol. @@ -79,7 +85,7 @@ class P2PAutomergeSyncProtocol: NWProtocolFramerImplementation { minimumIncompleteLength: headerSize, maximumLength: headerSize ) { buffer, _ -> Int in - guard let buffer = buffer else { + guard let buffer else { return 0 } if buffer.count < headerSize { @@ -96,7 +102,7 @@ class P2PAutomergeSyncProtocol: NWProtocolFramerImplementation { } // Create an object to deliver the message. - var messageType = P2PSyncMessageType.invalid + var messageType = P2PSyncMessageType.unknown if let parsedMessageType = P2PSyncMessageType(rawValue: header.type) { messageType = parsedMessageType } @@ -122,9 +128,9 @@ extension NWProtocolFramer.Message { /// The type of sync message. var syncMessageType: P2PSyncMessageType { if let type = self["SyncMessageType"] as? P2PSyncMessageType { - return type + type } else { - return .invalid + .unknown } } } diff --git a/Packages/automerge-repo/Sources/AutomergeRepo/Networking/PeerNetworking/TXTRecordKeys.swift b/Packages/automerge-repo/Sources/AutomergeRepo/Networking/PeerNetworking/TXTRecordKeys.swift new file mode 100644 index 00000000..5f509700 --- /dev/null +++ b/Packages/automerge-repo/Sources/AutomergeRepo/Networking/PeerNetworking/TXTRecordKeys.swift @@ -0,0 +1,9 @@ +/// A type that provides type-safe strings for TXTRecord publications with Bonjour +public enum TXTRecordKeys: Sendable { + /// The document identifier. + public static let doc_id = "doc_id" + /// The peer identifier. + public static let peer_id = "peer_id" + /// The human-readable name for the peer. + public static let name = "name" +} diff --git a/Packages/automerge-repo/Sources/AutomergeRepo/Networking/Providers/WebSocketProvider.swift b/Packages/automerge-repo/Sources/AutomergeRepo/Networking/Providers/WebSocketProvider.swift new file mode 100644 index 00000000..384e59f0 --- /dev/null +++ b/Packages/automerge-repo/Sources/AutomergeRepo/Networking/Providers/WebSocketProvider.swift @@ -0,0 +1,274 @@ +import OSLog + +public actor WebSocketProvider: NetworkProvider { + public typealias ProviderConfiguration = WebSocketProviderConfiguration + public struct WebSocketProviderConfiguration: Sendable { + let reconnectOnError: Bool + + public static let `default` = WebSocketProviderConfiguration(reconnectOnError: true) + } + + public var peeredConnections: [PeerConnection] + var delegate: (any NetworkEventReceiver)? + var peerId: PEER_ID? + var peerMetadata: PeerMetadata? + var webSocketTask: URLSessionWebSocketTask? + var backgroundWebSocketReceiveTask: Task? + var config: WebSocketProviderConfiguration + var endpoint: URL? + + public init(_ config: WebSocketProviderConfiguration = .default) { + self.config = config + self.peeredConnections = [] + self.delegate = nil + self.peerId = nil + self.peerMetadata = nil + self.webSocketTask = nil + self.backgroundWebSocketReceiveTask = nil + } + + // MARK: NetworkProvider Methods + + public func connect(to url: URL) async throws { + // TODO: refactor the connection logic to separate connecting and handling the peer/join + // messaging, from setting up the ongoing looping to allow for multiple retry attempts + // that return a concrete value of "good/no-good" separate from a protocol failure. + // ... something like + // func attemptConnect(to url: URL) async throws -> URLSessionWebSocketTask? + guard let peerId = self.peerId, + let delegate = self.delegate + else { + fatalError("Attempting to connect before connected to a delegate") + } + + // establish the WebSocket connection + self.endpoint = url + let request = URLRequest(url: url) + webSocketTask = URLSession.shared.webSocketTask(with: request) + guard let webSocketTask else { + #if DEBUG + fatalError("Attempting to configure and join a nil webSocketTask") + #else + return + #endif + } + + Logger.webSocket.trace("Activating websocket to \(url, privacy: .public)") + // start the websocket processing things + webSocketTask.resume() + + // since we initiated the WebSocket, it's on us to send an initial 'join' + // protocol message to start the handshake phase of the protocol + let joinMessage = SyncV1Msg.JoinMsg(senderId: peerId, metadata: self.peerMetadata) + let data = try SyncV1Msg.encode(joinMessage) + try await webSocketTask.send(.data(data)) + Logger.webSocket.trace("SEND: \(joinMessage.debugDescription)") + + do { + // Race a timeout against receiving a Peer message from the other side + // of the WebSocket connection. If we fail that race, shut down the connection + // and move into a .closed connectionState + let websocketMsg = try await self.nextMessage(withTimeout: .seconds(3.5)) + + // Now that we have the WebSocket message, figure out if we got what we expected. + // For the sync protocol handshake phase, it's essentially "peer or die" since + // we were the initiating side of the connection. + guard case let .peer(peerMsg) = try attemptToDecode(websocketMsg, peerOnly: true) else { + throw SyncV1Msg.Errors.UnexpectedMsg(msg: websocketMsg) + } + let newPeerConnection = PeerConnection(peerId: peerMsg.senderId, peerMetadata: peerMsg.peerMetadata) + self.peeredConnections = [newPeerConnection] + await delegate.receiveEvent(event: .ready(payload: newPeerConnection)) + Logger.webSocket.trace("Peered to targetId: \(peerMsg.senderId) \(peerMsg.debugDescription)") + } catch { + // if there's an error, disconnect anything that's lingering and cancel it down. + await self.disconnect() + throw error + } + + // If we have an existing task there, looping over messages, it means there was + // one previously set up, and there was a connection failure - at which point + // a reconnect was created to re-establish the webSocketTask. + if self.backgroundWebSocketReceiveTask == nil { + // infinitely loop and receive messages, but "out of band" + backgroundWebSocketReceiveTask = Task.detached { + try await self.ongoingRecieveWebSocketMessage() + } + } + } + + public func disconnect() async { + self.webSocketTask?.cancel(with: .normalClosure, reason: nil) + self.webSocketTask = nil + self.backgroundWebSocketReceiveTask?.cancel() + self.backgroundWebSocketReceiveTask = nil + self.endpoint = nil + + if let connectedPeer = self.peeredConnections.first { + self.peeredConnections.removeAll() + await delegate?.receiveEvent(event: .peerDisconnect(payload: .init(peerId: connectedPeer.peerId))) + } + + await delegate?.receiveEvent(event: .close) + } + + public func send(message: SyncV1Msg, to _: PEER_ID?) async { + guard let webSocketTask = self.webSocketTask else { + Logger.webSocket.warning("Attempt to send a message without a connection") + return + } + + do { + let data = try SyncV1Msg.encode(message) + try await webSocketTask.send(.data(data)) + } catch { + Logger.webSocket.error("Unable to encode and send message: \(error.localizedDescription, privacy: .public)") + } + } + + public func setDelegate( + _ delegate: any NetworkEventReceiver, + as peer: PEER_ID, + with metadata: PeerMetadata? + ) async { + self.delegate = delegate + self.peerId = peer + self.peerMetadata = metadata + } + + // MARK: utility methods + + private func attemptToDecode(_ msg: URLSessionWebSocketTask.Message, peerOnly: Bool = false) throws -> SyncV1Msg { + // Now that we have the WebSocket message, figure out if we got what we expected. + // For the sync protocol handshake phase, it's essentially "peer or die" since + // we were the initiating side of the connection. + switch msg { + case let .data(raw_data): + if peerOnly { + let msg = SyncV1Msg.decodePeer(raw_data) + if case .peer = msg { + return msg + } else { + // In the handshake phase and received anything other than a valid peer message + let decodeAttempted = SyncV1Msg.decode(raw_data) + Logger.webSocket + .warning( + "Decoding websocket message, expecting peer only - and it wasn't a peer message. RECEIVED MSG: \(decodeAttempted.debugDescription)" + ) + throw SyncV1Msg.Errors.UnexpectedMsg(msg: decodeAttempted) + } + } else { + let decodedMsg = SyncV1Msg.decode(raw_data) + if case .unknown = decodedMsg { + throw SyncV1Msg.Errors.UnexpectedMsg(msg: decodedMsg) + } + return decodedMsg + } + + case let .string(string): + // In the handshake phase and received anything other than a valid peer message + Logger.webSocket + .warning("Unknown websocket message received: .string(\(string))") + throw SyncV1Msg.Errors.UnexpectedMsg(msg: msg) + @unknown default: + // In the handshake phase and received anything other than a valid peer message + Logger.webSocket + .error("Unknown websocket message received: \(String(describing: msg))") + throw SyncV1Msg.Errors.UnexpectedMsg(msg: msg) + } + } + + // throw error on timeout + // throw error on cancel + // otherwise return the msg + private func nextMessage( + withTimeout: ContinuousClock.Instant + .Duration = .seconds(3.5) + ) async throws -> URLSessionWebSocketTask.Message { + // Co-operatively check to see if we're cancelled, and if so - we can bail out before + // going into the receive loop. + try Task.checkCancellation() + + // check the invariants + guard let webSocketTask = self.webSocketTask + else { + throw SyncV1Msg.Errors + .ConnectionClosed(errorDescription: "Attempting to wait for a websocket message when the task is nil") + } + + // Race a timeout against receiving a Peer message from the other side + // of the WebSocket connection. If we fail that race, shut down the connection + // and move into a .closed connectionState + let websocketMsg = try await withThrowingTaskGroup(of: URLSessionWebSocketTask.Message.self) { group in + group.addTask { + // retrieve the next websocket message + try await webSocketTask.receive() + } + + group.addTask { + // Race against the receive call with a continuous timer + try await Task.sleep(for: withTimeout) + throw SyncV1Msg.Errors.Timeout() + } + + guard let msg = try await group.next() else { + throw CancellationError() + } + // cancel all ongoing tasks (the websocket receive request, in this case) + group.cancelAll() + return msg + } + return websocketMsg + } + + /// Infinitely loops over incoming messages from the websocket and updates the state machine based on the messages + /// received. + private func ongoingRecieveWebSocketMessage() async throws { + var msgFromWebSocket: URLSessionWebSocketTask.Message? + while true { + guard let webSocketTask = self.webSocketTask else { + Logger.webSocket.warning("Receive Handler: webSocketTask is nil, terminating handler loop") + break + } + + try Task.checkCancellation() + + do { + msgFromWebSocket = try await webSocketTask.receive() + } catch { + if self.config.reconnectOnError, let endpoint = self.endpoint { + // TODO: add in some jitter/backoff logic, and potentially refactor to attempt to retry multiple times + try await self.connect(to: endpoint) + } else { + throw error + } + } + + do { + if let encodedMessage = msgFromWebSocket { + let msg = try attemptToDecode(encodedMessage) + await self.handleMessage(msg: msg) + } + } catch { + // catch decode failures, but don't terminate the whole shebang + // on a failure + Logger.webSocket + .warning("Unable to decode websocket message: \(error.localizedDescription, privacy: .public)") + } + } + } + + func handleMessage(msg: SyncV1Msg) async { + switch msg { + case let .leave(msg): + Logger.webSocket.trace("\(msg.senderId) requests to kill the connection") + await self.disconnect() + case let .join(msg): + Logger.webSocket.error("Unexpected message received: \(msg.debugDescription)") + case let .peer(msg): + Logger.webSocket.error("Unexpected message received: \(msg.debugDescription)") + default: + await self.delegate?.receiveEvent(event: .message(payload: msg)) + } + } +} diff --git a/MeetingNotes/Sync/WebSocketNetworking/WebsocketSyncConnection.swift b/Packages/automerge-repo/Sources/AutomergeRepo/Networking/WebSocketNetworking/WebsocketSyncConnection.swift similarity index 86% rename from MeetingNotes/Sync/WebSocketNetworking/WebsocketSyncConnection.swift rename to Packages/automerge-repo/Sources/AutomergeRepo/Networking/WebSocketNetworking/WebsocketSyncConnection.swift index cd162f9b..b8fded2b 100644 --- a/MeetingNotes/Sync/WebSocketNetworking/WebsocketSyncConnection.swift +++ b/Packages/automerge-repo/Sources/AutomergeRepo/Networking/WebSocketNetworking/WebsocketSyncConnection.swift @@ -4,53 +4,9 @@ import Foundation import OSLog import PotentCBOR -private struct TimeoutError: LocalizedError { - var errorDescription: String? = "Task timed out before completion" -} - -private struct SyncComplete: LocalizedError { - var errorDescription: String? = "The synchronization process is complete" -} - -private struct WebsocketClosed: LocalizedError { - var errorDescription: String? = "The websocket task was closed and/or nil" -} - -private struct InvalidURL: LocalizedError { - let urlString: String - var errorDescription: String? { - "Invalid URL: \(urlString)" - } -} - -private struct UnexpectedWebSocketMsg: LocalizedError { - let msg: MSG - var errorDescription: String? { - "Received an unexpected websocket message: \(msg)" - } -} - -private struct DocumentUnavailable: LocalizedError { - var errorDescription: String? = "The requested document isn't available" -} - /// A class that provides a WebSocket connection to sync an Automerge document. -public final class WebsocketSyncConnection: ObservableObject { - /// The state of the WebSocket sync connection. - public enum SyncProtocolState: String { - /// A sync connection hasn't yet been requested - case new - - /// The state is initiating and waiting to successfully peer with the recipient. - case handshake - - /// The connection has successfully peered. - case peered - - /// The connection has terminated. - case closed - } - +@MainActor +public final class WebsocketSyncConnection: ObservableObject, Identifiable { private var webSocketTask: URLSessionWebSocketTask? /// This connections "peer identifier" private let senderId: String @@ -68,12 +24,12 @@ public final class WebsocketSyncConnection: ObservableObject { /// A handle to a cancellable Combine pipeline that watches a document for updates and attempts to start a sync when /// it changes. - private var syncTrigger: Cancellable? + private var syncTrigger: (any Cancellable)? // TODO: Add a delegate link of some form for a 'ephemeral' msg data handler // TODO: Add an indicator of if we should involve ourselves in "gossip" about updates - @Published public var connectionState: SyncProtocolState + @Published public var protocolState: ProtocolState @Published public var syncInProgress: Bool // MARK: Initializers, registration/setup @@ -81,7 +37,7 @@ public final class WebsocketSyncConnection: ObservableObject { // having register after initialization lets us add within a SwiftUI view, and then // configure and activate things onAppear within the view... public init(_ document: Automerge.Document?, id documentId: DocumentId?) { - connectionState = .new + protocolState = .setup syncState = SyncState() senderId = UUID().uuidString self.document = document @@ -124,7 +80,7 @@ public final class WebsocketSyncConnection: ObservableObject { try Task.checkCancellation() - guard websocketconnection.connectionState == .peered else { return nil } + guard websocketconnection.protocolState == .ready else { return nil } // enable the request... websocketconnection.receiveHandler = nil @@ -136,7 +92,7 @@ public final class WebsocketSyncConnection: ObservableObject { try Task.checkCancellation() Logger.webSocket .trace( - "sync in progress, !cancelled - state is: \(websocketconnection.connectionState.rawValue, privacy: .public)" + "sync in progress, !cancelled - state is: \(websocketconnection.protocolState.rawValue, privacy: .public)" ) // Race a timeout against receiving a Peer message from the other side // of the WebSocket connection. If we fail that race, shut down the connection @@ -175,7 +131,8 @@ public final class WebsocketSyncConnection: ObservableObject { // check the invariants guard let webSocketTask = self.webSocketTask else { - throw WebsocketClosed(errorDescription: "Attempting to wait for a websocket message when the task is nil") + throw SyncV1Msg.Errors + .ConnectionClosed(errorDescription: "Attempting to wait for a websocket message when the task is nil") } // Race a timeout against receiving a Peer message from the other side @@ -190,7 +147,7 @@ public final class WebsocketSyncConnection: ObservableObject { group.addTask { // Race against the receive call with a continuous timer try await Task.sleep(for: withTimeout) - throw TimeoutError() + throw SyncV1Msg.Errors.Timeout() } guard let msg = try await group.next() else { @@ -203,29 +160,29 @@ public final class WebsocketSyncConnection: ObservableObject { return websocketMsg } - private func attemptToDecode(_ msg: URLSessionWebSocketTask.Message, peerOnly: Bool = false) throws -> V1 { + private func attemptToDecode(_ msg: URLSessionWebSocketTask.Message, peerOnly: Bool = false) throws -> SyncV1Msg { // Now that we have the WebSocket message, figure out if we got what we expected. // For the sync protocol handshake phase, it's essentially "peer or die" since // we were the initiating side of the connection. switch msg { case let .data(raw_data): if peerOnly { - let msg = V1.decodePeer(raw_data) + let msg = SyncV1Msg.decodePeer(raw_data) if case .peer = msg { return msg } else { // In the handshake phase and received anything other than a valid peer message - let decodeAttempted = V1.decode(raw_data) + let decodeAttempted = SyncV1Msg.decode(raw_data) Logger.webSocket .warning( "Decoding websocket message, expecting peer only - and it wasn't a peer message. RECEIVED MSG: \(decodeAttempted.debugDescription)" ) - throw UnexpectedWebSocketMsg(msg: decodeAttempted) + throw SyncV1Msg.Errors.UnexpectedMsg(msg: decodeAttempted) } } else { - let decodedMsg = V1.decode(raw_data) + let decodedMsg = SyncV1Msg.decode(raw_data) if case .unknown = decodedMsg { - throw UnexpectedWebSocketMsg(msg: decodedMsg) + throw SyncV1Msg.Errors.UnexpectedMsg(msg: decodedMsg) } return decodedMsg } @@ -234,12 +191,12 @@ public final class WebsocketSyncConnection: ObservableObject { // In the handshake phase and received anything other than a valid peer message Logger.webSocket .warning("Unknown websocket message received: .string(\(string))") - throw UnexpectedWebSocketMsg(msg: msg) + throw SyncV1Msg.Errors.UnexpectedMsg(msg: msg) @unknown default: // In the handshake phase and received anything other than a valid peer message Logger.webSocket .error("Unknown websocket message received: \(String(describing: msg))") - throw UnexpectedWebSocketMsg(msg: msg) + throw SyncV1Msg.Errors.UnexpectedMsg(msg: msg) } } @@ -249,7 +206,7 @@ public final class WebsocketSyncConnection: ObservableObject { /// /// throws an error if something is awry, otherwise returns Void, with the connection established public func connect(_ destination: String) async throws { - guard connectionState == .new || connectionState == .closed else { + guard protocolState == .setup || protocolState == .closed else { return } guard self.document != nil, self.documentId != nil else { @@ -262,7 +219,7 @@ public final class WebsocketSyncConnection: ObservableObject { } guard let url = URL(string: destination) else { Logger.webSocket.error("Destination provided is not a valid URL") - throw InvalidURL(urlString: destination) + throw SyncV1Msg.Errors.InvalidURL(urlString: destination) } // establishes the websocket @@ -272,7 +229,7 @@ public final class WebsocketSyncConnection: ObservableObject { syncState = SyncState() webSocketTask = URLSession.shared.webSocketTask(with: request) } - guard let webSocketTask = webSocketTask else { + guard let webSocketTask else { #if DEBUG fatalError("Attempting to configure and join a nil webSocketTask") #else @@ -286,12 +243,12 @@ public final class WebsocketSyncConnection: ObservableObject { // since we initiated the WebSocket, it's on us to send an initial 'join' // protocol message to start the handshake phase of the protocol - let joinMessage = V1.JoinMsg(senderId: senderId) - let data = try V1.encode(joinMessage) + let joinMessage = SyncV1Msg.JoinMsg(senderId: senderId) + let data = try SyncV1Msg.encode(joinMessage) try await webSocketTask.send(.data(data)) Logger.webSocket.trace("SEND: \(joinMessage.debugDescription)") await MainActor.run { - self.connectionState = .handshake + self.protocolState = .preparing } do { @@ -304,21 +261,21 @@ public final class WebsocketSyncConnection: ObservableObject { // For the sync protocol handshake phase, it's essentially "peer or die" since // we were the initiating side of the connection. guard case let .peer(peerMsg) = try attemptToDecode(websocketMsg, peerOnly: true) else { - throw UnexpectedWebSocketMsg(msg: websocketMsg) + throw SyncV1Msg.Errors.UnexpectedMsg(msg: websocketMsg) } Logger.webSocket.trace("Peered to targetId: \(peerMsg.senderId) \(peerMsg.debugDescription)") // TODO: handle the gossip setup - read and process the peer metadata await MainActor.run { self.targetId = peerMsg.senderId - self.connectionState = .peered + self.protocolState = .ready } } catch { // if there's an error, disconnect anything that's lingering and cancel it down. await self.disconnect() throw error } - assert(self.connectionState == .peered) + assert(self.protocolState == .ready) } /// Asynchronously disconnect the WebSocket and shut down active sessions. @@ -328,7 +285,7 @@ public final class WebsocketSyncConnection: ObservableObject { self.receiveHandler?.cancel() await MainActor.run { self.syncTrigger = nil - self.connectionState = .closed + self.protocolState = .closed self.webSocketTask = nil self.syncInProgress = false } @@ -341,7 +298,7 @@ public final class WebsocketSyncConnection: ObservableObject { // verify we're in the right state before invoking the recursive (async) handler setup // and start the process of synchronizing the document. - if self.connectionState == .peered { + if self.protocolState == .ready { // NOTE: this is technically a race between do we accept a message and do something // with it (possibly changing state), or do we initiate a sync ourselves. In practice // against Automerge-repo code, it doesn't proactively ask us to do anything, playing @@ -367,7 +324,7 @@ public final class WebsocketSyncConnection: ObservableObject { public func sendRequestForDocument() async throws { // verify we're already connected and peered - guard connectionState == .peered, + guard protocolState == .ready, let document = self.document, let documentId = self.documentId, let targetId = self.targetId, @@ -385,20 +342,20 @@ public final class WebsocketSyncConnection: ObservableObject { await MainActor.run { self.syncInProgress = true } - let requestMsg = V1.RequestMsg( + let requestMsg = SyncV1Msg.RequestMsg( documentId: documentId.description, senderId: self.senderId, targetId: targetId, sync_message: syncData ) - let data = try V1.encode(requestMsg) + let data = try SyncV1Msg.encode(requestMsg) try await webSocketTask.send(.data(data)) Logger.webSocket.trace("SEND: \(requestMsg.debugDescription)") } /// Start a synchronization process for the Automerge document private func initiateSync() async { - guard connectionState == .peered, + guard protocolState == .ready, syncInProgress == false else { return @@ -419,10 +376,10 @@ public final class WebsocketSyncConnection: ObservableObject { if let syncData = document.generateSyncMessage(state: self.syncState) { await MainActor.run { - self.connectionState = .peered + self.protocolState = .ready self.syncInProgress = true } - let syncMsg = V1.SyncMsg( + let syncMsg = SyncV1Msg.SyncMsg( documentId: documentId.description, senderId: self.senderId, targetId: targetId, @@ -430,13 +387,13 @@ public final class WebsocketSyncConnection: ObservableObject { ) var data: Data? = nil do { - data = try V1.encode(syncMsg) + data = try SyncV1Msg.encode(syncMsg) } catch { Logger.webSocket.warning("Error encoding data: \(error.localizedDescription, privacy: .public)") } do { - guard let data = data else { + guard let data else { return } try await webSocketTask.send(.data(data)) @@ -473,7 +430,7 @@ public final class WebsocketSyncConnection: ObservableObject { Logger.webSocket .trace( - "Receive Handler: Task not cancelled, awaiting next message, state is \(self.connectionState.rawValue, privacy: .public)" + "Receive Handler: Task not cancelled, awaiting next message, state is \(self.protocolState.rawValue, privacy: .public)" ) let webSocketMessage = try await webSocketTask.receive() @@ -492,15 +449,15 @@ public final class WebsocketSyncConnection: ObservableObject { /// - if it `connectionState` is in ``SyncProtocolState/handshake`` and receives anything other than a peer msg /// - if it is invoked while `connectionState` is reporting a ``SyncProtocolState/closed`` state /// it disconnects and shuts down the web-socket. - private func handleReceivedMessage(msg: V1) async { - switch connectionState { - case .new: + private func handleReceivedMessage(msg: SyncV1Msg) async { + switch protocolState { + case .setup: Logger.webSocket.warning("RCVD: \(msg.debugDescription, privacy: .public) while in NEW state") - case .handshake: + case .preparing: if case let .peer(peerMsg) = msg { await MainActor.run { self.targetId = peerMsg.targetId - self.connectionState = .peered + self.protocolState = .ready } // TODO: handle the gossip setup - read and process the peer metadata } else { @@ -511,7 +468,7 @@ public final class WebsocketSyncConnection: ObservableObject { ) await self.disconnect() } - case .peered: + case .ready: switch msg { case let .error(errorMsg): Logger.webSocket.warning("RCVD ERROR: \(errorMsg.debugDescription)") @@ -547,7 +504,7 @@ public final class WebsocketSyncConnection: ObservableObject { self.syncInProgress = true } } - let replyingSyncMsg = V1.SyncMsg( + let replyingSyncMsg = SyncV1Msg.SyncMsg( documentId: documentId.description, senderId: self.senderId, targetId: targetId, @@ -555,7 +512,7 @@ public final class WebsocketSyncConnection: ObservableObject { ) Logger.webSocket .trace(" - SYNC: Sending another sync msg after applying updates") - let replyData = try V1.encode(replyingSyncMsg) + let replyData = try SyncV1Msg.encode(replyingSyncMsg) try await webSocketTask.send(.data(replyData)) Logger.webSocket.trace("SEND: \(replyingSyncMsg.debugDescription)") } else { @@ -583,7 +540,7 @@ public final class WebsocketSyncConnection: ObservableObject { case let .unavailable(inside_msg): Logger.webSocket.trace("RCVD unexpected msg: \(inside_msg.debugDescription, privacy: .public)") - // Messages that are technically allowed, but not common in the "peered" state unless + // Messages that are technically allowed, but not common in the "ready" state unless // you're "serving up multiple documents" (this implementation links to a single Automerge // document. @@ -593,7 +550,10 @@ public final class WebsocketSyncConnection: ObservableObject { case let .remoteSubscriptionChange(inside_msg): Logger.webSocket.warning("RCVD unusual msg: \(inside_msg.debugDescription, privacy: .public)") - // Messages that are always unexpected while in the "peered" state + case let .leave(inside_msg): + Logger.webSocket.warning("RCVD unusual msg: \(inside_msg.debugDescription, privacy: .public)") + + // Messages that are always unexpected while in the "ready" state case let .peer(inside_msg): Logger.webSocket.error("RCVD unexpected msg: \(inside_msg.debugDescription, privacy: .public)") diff --git a/Packages/automerge-repo/Sources/AutomergeRepo/PeerMetadata.swift b/Packages/automerge-repo/Sources/AutomergeRepo/PeerMetadata.swift new file mode 100644 index 00000000..ef85702c --- /dev/null +++ b/Packages/automerge-repo/Sources/AutomergeRepo/PeerMetadata.swift @@ -0,0 +1,23 @@ +import Foundation + +// ; Metadata sent in either the join or peer message types +// peer_metadata = { +// ; The storage ID of this peer +// ? storageId: storage_id, +// ; Whether the sender expects to connect again with this storage ID +// isEphemeral: bool +// } + +public struct PeerMetadata: Sendable, Codable, CustomDebugStringConvertible { + public var storageId: STORAGE_ID? + public var isEphemeral: Bool + + public init(storageId: STORAGE_ID? = nil, isEphemeral: Bool) { + self.storageId = storageId + self.isEphemeral = isEphemeral + } + + public var debugDescription: String { + "[storageId: \(storageId ?? "nil"), ephemeral: \(isEphemeral)]" + } +} diff --git a/Packages/automerge-repo/Sources/AutomergeRepo/Repo+Errors.swift b/Packages/automerge-repo/Sources/AutomergeRepo/Repo+Errors.swift new file mode 100644 index 00000000..777ffb55 --- /dev/null +++ b/Packages/automerge-repo/Sources/AutomergeRepo/Repo+Errors.swift @@ -0,0 +1,31 @@ +import Foundation + +enum Errors: Sendable { + public struct Unavailable: Sendable, LocalizedError { + let id: DocumentId + public var errorDescription: String? { + "Unknown document Id: \(self.id)" + } + } + + public struct DocDeleted: Sendable, LocalizedError { + let id: DocumentId + public var errorDescription: String? { + "Document with Id: \(self.id) has been deleted." + } + } + + public struct DocUnavailable: Sendable, LocalizedError { + let id: DocumentId + public var errorDescription: String? { + "Document with Id: \(self.id) is unavailable." + } + } + + public struct BigBadaBoom: Sendable, LocalizedError { + let msg: String + public var errorDescription: String? { + "Something went quite wrong: \(self.msg)." + } + } +} diff --git a/Packages/automerge-repo/Sources/AutomergeRepo/Repo.swift b/Packages/automerge-repo/Sources/AutomergeRepo/Repo.swift new file mode 100644 index 00000000..2dbcf687 --- /dev/null +++ b/Packages/automerge-repo/Sources/AutomergeRepo/Repo.swift @@ -0,0 +1,604 @@ +import Automerge +import AutomergeUtilities +import Foundation +import OSLog + +/// A type that accepts ephemeral messages as they arrive from connected network peers. +public protocol EphemeralMessageDelegate: Sendable { + /// Receive and process an event from a Network Provider. + /// - Parameter event: The event to process. + func receiveEphemeralMessage(_ msg: SyncV1Msg.EphemeralMsg) async +} + +public actor Repo { + public let peerId: PEER_ID + public var localPeerMetadata: PeerMetadata + + private var handles: [DocumentId: InternalDocHandle] = [:] + private var storage: DocumentStorage? + private var network: NetworkSubsystem + + // saveDebounceRate = 100 + var sharePolicy: any SharePolicy + + /** maps peer id to to persistence information (storageId, isEphemeral), access by collection synchronizer */ + /** @hidden */ + private var peerMetadataByPeerId: [PEER_ID: PeerMetadata] = [:] + + private let maxRetriesForFetch: Int = 300 + private let pendingRequestWaitDuration: Duration = .seconds(1) + private var pendingRequestReadAttempts: [DocumentId: Int] = [:] + +// #remoteHeadsSubscriptions = new RemoteHeadsSubscriptions() +// export class RemoteHeadsSubscriptions extends EventEmitter { +// // Storage IDs we have received remote heads from +// #knownHeads: Map> = new Map() + // ^^^ DUPLICATES DATA stored in DocHandle... + +// // Storage IDs we have subscribed to via Repo.subscribeToRemoteHeads +// #ourSubscriptions: Set = new Set() + +// // Storage IDs other peers have subscribed to by sending us a control message +// #theirSubscriptions: Map> = new Map() + +// // Peers we will always share remote heads with even if they are not subscribed +// #generousPeers: Set = new Set() + +// // Documents each peer has open, we need this information so we only send remote heads of documents that the +// /peer knows +// #subscribedDocsByPeer: Map> = new Map() + + private var remoteHeadsGossipingEnabled = false + + private var _ephemeralMessageDelegate: (any EphemeralMessageDelegate)? + + // REPO + // https://github.com/automerge/automerge-repo/blob/main/packages/automerge-repo/src/Repo.ts + // - looks like it's the rough equivalent to the overall synchronization coordinator + + // - owns synchronizer, network, and storage subsystems + // - it "just" manages the connections, adds, and removals - when documents "appear", they're + // added to the synchronizer, which is the thing that accepts sync messages and tries to keep documents + // up to date with any registered peers. It emits (at a debounced rate) events to let anyone watching + // a document know that changes have occurred. + // + // Looks like it also has the idea of a sharePolicy per document, and if provided, then a document + // will be shared with peers (or positively respond to requests for the document if it's requested) + + // Repo + // property: peers [PeerId] - all (currently) connected peers + // property: handles [DocHandle] - list of all the DocHandles + // - func clone(Document) -> Document + // - func export(DocumentId) -> uint8[] + // - func import(uint8[]) -> Document + // - func create() -> Document + // - func find(DocumentId) -> Document + // - func delete(DocumentId) + // - func storageId() -> StorageId (async) + // - func storageIdForPeer(peerId) -> StorageId + // - func subscribeToRemotes([StorageId]) + + public init( + sharePolicy: some SharePolicy + ) { + self.peerId = UUID().uuidString + self.handles = [:] + self.peerMetadataByPeerId = [:] + self.storage = nil + self.localPeerMetadata = PeerMetadata(storageId: nil, isEphemeral: true) + self.sharePolicy = sharePolicy + self.network = NetworkSubsystem() + } + + /// Add a persistent storage provider to the repo. + /// - Parameter provider: The storage provider to add. + public func addStorageProvider(_ provider: some StorageProvider) { + self.storage = DocumentStorage(provider) + self.localPeerMetadata = PeerMetadata(storageId: provider.id, isEphemeral: false) + } + + /// Add a configured network provider to the repo + /// - Parameter adapter: The network provider to add. + public func addNetworkAdapter(adapter: any NetworkProvider) async { + if await self.network.repo == nil { + await self.network.setRepo(self) + } + await network.addAdapter(adapter: adapter) + } + + /// Set the delegate that to receive ephemeral messages from Automerge-repo peers + /// - Parameter delegate: The object that Automerge-repo calls with ephemeral messages. + public func setDelegate(_ delegate: some EphemeralMessageDelegate) { + self._ephemeralMessageDelegate = delegate + } + + /// Returns a list of repository documentIds. + /// + /// The list does not reflect deleted or unavailable documents that have been requested, but may return + /// Ids for documents still being creating, stored, or transferring from a peer. + public func documentIds() async -> [DocumentId] { + handles.values + .filter { handle in + handle.state == .ready || handle.state == .loading || handle.state == .requesting + } + .map(\.id) + } + + // MARK: Synchronization Pieces - Peers + + /// Returns a list of the ids of available peers. + public func peers() async -> [PEER_ID] { + peerMetadataByPeerId.keys.sorted() + } + + /// Returns the storage Id of for the id of the peer that you provide. + /// - Parameter peer: The peer to request + func getStorageIdOfPeer(peer: PEER_ID) async -> STORAGE_ID? { + if let metaForPeer = peerMetadataByPeerId[peer] { + metaForPeer.storageId + } else { + nil + } + } + + func beginSync(docId: DocumentId, to peer: PEER_ID) async { + do { + let handle = try await self.resolveDocHandle(id: docId) + let syncState = self.syncState(id: docId, peer: peer) + if let syncData = handle.doc.generateSyncMessage(state: syncState) { + let syncMsg: SyncV1Msg = .sync(.init( + documentId: docId.description, + senderId: self.peerId, + targetId: peer, + sync_message: syncData + )) + await network.send(message: syncMsg, to: peer) + } + } catch { + Logger.repo + .error("Failed to generate sync on peer connection: \(error.localizedDescription, privacy: .public)") + } + } + + func addPeerWithMetadata(peer: PEER_ID, metadata: PeerMetadata?) async { + peerMetadataByPeerId[peer] = metadata + for docId in await self.documentIds() { + if await sharePolicy.share(peer: peer, docId: docId) { + await beginSync(docId: docId, to: peer) + } + } + } + + func removePeer(peer: PEER_ID) { + peerMetadataByPeerId.removeValue(forKey: peer) + } + + // MARK: Handle pass-back of Ephemeral Messages + + func handleEphemeralMessage(_ msg: SyncV1Msg.EphemeralMsg) async { + await self._ephemeralMessageDelegate?.receiveEphemeralMessage(msg) + } + + // MARK: Synchronization Pieces - For Network Subsystem Access + + func handleSync(msg: SyncV1Msg.SyncMsg) async { + Logger.repo.trace("PEER: \(self.peerId) - handling a sync msg from \(msg.senderId) to \(msg.targetId)") + guard let docId = DocumentId(msg.documentId) else { + Logger.repo + .warning("Invalid documentId \(msg.documentId) received in a sync message \(msg.debugDescription)") + return + } + do { + if handles[docId] == nil { + // There is no in-memory handle for the document being synced, so this is a request + // to create a local copy of the document encapsulated in the sync message. + let newDocument = Document() + let newHandle = InternalDocHandle(id: docId, isNew: true, initialValue: newDocument) + + // must update the repo with the new handle and empty document _before_ + // using syncState, since it needs to resolve the documentId + handles[docId] = newHandle + _ = try await self.resolveDocHandle(id: docId) + } + guard let handle = handles[docId] else { fatalError("HANDLE DOESN'T EXIST") } + let docFromHandle = handle.doc ?? Document() + let syncState = self.syncState(id: docId, peer: msg.senderId) + // Apply the request message as a sync update + try docFromHandle.receiveSyncMessage(state: syncState, message: msg.data) + // Stash the updated document and sync state + await self.updateDoc(id: docId, doc: docFromHandle) + await self.updateSyncState(id: docId, peer: msg.senderId, syncState: syncState) + // Attempt to generate a sync message to reply + + // DEBUG ONLY + // print("\(self.peerId): STATE OF \(handle.id)") + // try docFromHandle.walk() + + if let syncData = docFromHandle.generateSyncMessage(state: syncState) { + let syncMsg: SyncV1Msg = .sync(.init( + documentId: docId.description, + senderId: self.peerId, + targetId: msg.senderId, + sync_message: syncData + )) + Logger.repo.trace("Sync received and applied, replying with a sync msg back to \(msg.senderId)") + await network.send(message: syncMsg, to: msg.senderId) + } + // else no sync is needed, as the last sync state reports that it knows about + // all the changes it needs - that it's up to date with the local document + } catch { + let err: SyncV1Msg = + .error(.init(message: "Error receiving sync: \(error.localizedDescription)")) + Logger.repo.warning("Error receiving initial sync for \(docId, privacy: .public)") + await network.send(message: err, to: msg.senderId) + } + } + + func handleRequest(msg: SyncV1Msg.RequestMsg) async { + guard let docId = DocumentId(msg.documentId) else { + Logger.repo + .warning("Invalid documentId \(msg.documentId) received in a sync message \(msg.debugDescription)") + return + } + if handles[docId] != nil { + // If we have the document, see if we're agreeable to sending a copy + if await sharePolicy.share(peer: msg.senderId, docId: docId) { + do { + let handle = try await self.resolveDocHandle(id: docId) + let syncState = self.syncState(id: docId, peer: msg.senderId) + // Apply the request message as a sync update + try handle.doc.receiveSyncMessage(state: syncState, message: msg.data) + // Stash the updated doc and sync state + await self.updateDoc(id: docId, doc: handle.doc) + await self.updateSyncState(id: docId, peer: msg.senderId, syncState: syncState) + // Attempt to generate a sync message to reply + if let syncData = handle.doc.generateSyncMessage(state: syncState) { + let syncMsg: SyncV1Msg = .sync(.init( + documentId: docId.description, + senderId: self.peerId, + targetId: msg.senderId, + sync_message: syncData + )) + await network.send(message: syncMsg, to: msg.senderId) + } // else no sync is needed, syncstate reports that they have everything they need + } catch { + let err: SyncV1Msg = + .error(.init(message: "Unable to resolve document: \(error.localizedDescription)")) + await network.send(message: err, to: msg.senderId) + } + } else { + let nope = SyncV1Msg.UnavailableMsg( + documentId: msg.documentId, + senderId: self.peerId, + targetId: msg.senderId + ) + await network.send(message: .unavailable(nope), to: msg.senderId) + } + + } else { + let nope = SyncV1Msg.UnavailableMsg( + documentId: msg.documentId, + senderId: self.peerId, + targetId: msg.senderId + ) + await network.send(message: .unavailable(nope), to: msg.senderId) + } + } + + // MARK: PUBLIC API + + /// Creates a new Automerge document, storing it and sharing the creation with connected peers. + /// - Returns: The Automerge document. + public func create() async throws -> DocHandle { + let handle = InternalDocHandle(id: DocumentId(), isNew: true, initialValue: Document()) + self.handles[handle.id] = handle + let resolved = try await resolveDocHandle(id: handle.id) + return resolved + } + + /// Creates a new Automerge document, storing it and sharing the creation with connected peers. + /// - Returns: The Automerge document. + /// - Parameter id: The Id of the Automerge document. + public func create(id: DocumentId) async throws -> DocHandle { + let handle = InternalDocHandle(id: id, isNew: true, initialValue: Document()) + self.handles[handle.id] = handle + let resolved = try await resolveDocHandle(id: handle.id) + return resolved + } + + /// Creates a new Automerge document, storing it and sharing the creation with connected peers. + /// - Parameter doc: The Automerge document to use for the new, shared document + /// - Returns: The Automerge document. + public func create(doc: Document, id: DocumentId? = nil) async throws -> DocHandle { + let creationId = id ?? DocumentId() + let handle = InternalDocHandle(id: creationId, isNew: true, initialValue: doc) + self.handles[handle.id] = handle + let resolved = try await resolveDocHandle(id: handle.id) + return resolved + } + + /// Creates a new Automerge document, storing it and sharing the creation with connected peers. + /// - Parameter data: The data to load as an Automerge document for the new, shared document. + /// - Returns: The Automerge document. + public func create(data: Data, id: DocumentId? = nil) async throws -> DocHandle { + let creationId = id ?? DocumentId() + let handle = try InternalDocHandle(id: creationId, isNew: true, initialValue: Document(data)) + self.handles[handle.id] = handle + let resolved = try await resolveDocHandle(id: handle.id) + return resolved + } + + /// Clones a document the repo already knows to create a new, shared document. + /// - Parameter id: The id of the document to clone. + /// - Returns: The Automerge document. + public func clone(id: DocumentId) async throws -> DocHandle { + let handle = try await resolveDocHandle(id: id) + let fork = handle.doc.fork() + let newId = DocumentId() + let newHandle = InternalDocHandle(id: newId, isNew: false, initialValue: fork) + handles[newHandle.id] = newHandle + let resolved = try await resolveDocHandle(id: newHandle.id) + return resolved + } + + public func find(id: DocumentId) async throws -> DocHandle { + // generally of the idea that we'll drive DocHandle state updates from within Repo + // and these async methods + let handle: InternalDocHandle + if let knownHandle = handles[id] { + handle = knownHandle + } else { + let newHandle = InternalDocHandle(id: id, isNew: false) + handles[id] = newHandle + handle = newHandle + } + return try await resolveDocHandle(id: handle.id) + } + + /// Deletes an automerge document from the repo. + /// - Parameter id: The id of the document to remove. + /// + /// > NOTE: deletes do not propagate to connected peers. + public func delete(id: DocumentId) async throws { + guard let originalDocHandle = handles[id] else { + throw Errors.Unavailable(id: id) + } + originalDocHandle.state = .deleted + originalDocHandle.doc = nil + // STRUCT ONLY handles[id] = originalDocHandle + + try await withThrowingTaskGroup(of: Void.self) { group in + group.addTask { + try await self.purgeFromStorage(id: id) + } + // specifically call/wait in case we get an error from + // the delete process in purging the document. + try await group.next() + } + } + + /// Export the data associated with an Automerge document from the repo. + /// - Parameter id: The id of the document to export. + /// - Returns: The latest, compacted data of the Automerge document. + public func export(id: DocumentId) async throws -> Data { + let handle = try await self.resolveDocHandle(id: id) + return handle.doc.save() + } + + /// Imports data as a new Automerge document + /// - Parameter data: The data to import as an Automerge document + /// - Returns: The id of the document that was created on import. + public func `import`(data: Data) async throws -> DocHandle { + let handle = try InternalDocHandle(id: DocumentId(), isNew: true, initialValue: Document(data)) + self.handles[handle.id] = handle + return try await self.resolveDocHandle(id: handle.id) + } + + public func subscribeToRemotes(remotes _: [STORAGE_ID]) async {} + + /// The storage id of this repo, if any. + /// - Returns: The storage id from the repo's storage provider or nil. + public func storageId() async -> STORAGE_ID? { + if let storage { + return await storage.id + } + return nil + } + + // MARK: Methods to expose retrieving DocHandles to the subsystems + + func syncState(id: DocumentId, peer: PEER_ID) -> SyncState { + guard let handle = handles[id] else { + fatalError("No stored dochandle for id: \(id)") + } + if let handleSyncState = handle.syncStates[peer] { + Logger.repo.trace("Providing stored sync state for doc \(id)") + return handleSyncState + } else { + // TODO: add attempt to load from storage and return it before creating a new one + Logger.repo.trace("No stored sync state for doc \(id) and peer \(peer).") + Logger.repo.trace("Creating a new sync state for doc \(id)") + return SyncState() + } + } + + func updateSyncState(id: DocumentId, peer: PEER_ID, syncState: SyncState) async { + guard let handle = handles[id] else { + fatalError("No stored dochandle for id: \(id)") + } + Logger.repo.trace("Storing updated sync state for doc \(id) and peer \(peer).") + handle.syncStates[peer] = syncState + } + + func markDocUnavailable(id: DocumentId) async { + // handling a requested document being marked as unavailable after all peers have been checked + guard let handle = handles[id] else { + Logger.repo.error("missing handle for documentId \(id.description) while attempt to mark unavailable") + return + } + assert(handle.state == .requesting) + handle.state = .unavailable + handles[id] = handle + } + + func updateDoc(id: DocumentId, doc: Document) async { + // handling a requested document being marked as ready after document contents received + guard let handle = handles[id] else { + fatalError("No stored document handle for document id: \(id)") + } + if handle.state == .requesting { + handle.state = .ready + } + assert(handle.state == .ready) + handle.doc = doc + if let storage = self.storage { + do { + try await withThrowingTaskGroup(of: Void.self) { group in + group.addTask { + try await storage.saveDoc(id: id, doc: doc) + } + // specifically call/wait in case we get an error from + // the delete process in purging the document. + try await group.next() + } + } catch { + Logger.repo + .warning( + "Error received while attempting to store document ID \(id): \(error.localizedDescription)" + ) + } + } + } + + // MARK: Methods to resolve docHandles + + func merge(id: DocumentId, with: DocumentId) async throws { + guard let handle1 = handles[id] else { + throw Errors.DocUnavailable(id: id) + } + guard let handle2 = handles[with] else { + throw Errors.DocUnavailable(id: with) + } + + let doc1 = try await resolveDocHandle(id: handle1.id) + // Start with updating from storage changes, if any + if let doc1Storage = try await storage?.loadDoc(id: handle1.id) { + try doc1.doc.merge(other: doc1Storage) + } + + // merge in the provided second document from memory + let doc2 = try await resolveDocHandle(id: handle2.id) + try doc1.doc.merge(other: doc2.doc) + + // JUST IN CASE, try and load doc2 from storage and merge that if available + if let doc2Storage = try await storage?.loadDoc(id: handle2.id) { + try doc1.doc.merge(other: doc2Storage) + } + // finally, update the repo + await self.updateDoc(id: doc1.id, doc: doc1.doc) + } + + private func loadFromStorage(id: DocumentId) async throws -> Document? { + guard let storage = self.storage else { + return nil + } + return try await storage.loadDoc(id: id) + } + + private func purgeFromStorage(id: DocumentId) async throws { + guard let storage = self.storage else { + return + } + try await storage.purgeDoc(id: id) + } + + private func resolveDocHandle(id: DocumentId) async throws -> DocHandle { + if let handle: InternalDocHandle = handles[id] { + switch handle.state { + case .idle: + if handle.doc != nil { + // if there's an Automerge document in memory, jump to ready + handle.state = .ready + // STRUCT ONLY handles[id] = handle + } else { + // otherwise, first attempt to load it from persistent storage + // (if available) + handle.state = .loading + // STRUCT ONLY handles[id] = handle + } + return try await resolveDocHandle(id: id) + case .loading: + // Do we have the document + if let docFromHandle = handle.doc { + // We have the document - so being in loading means "try to save this to + // a storage provider, if one exists", then hand it back as good. + if let storage = self.storage { + await withThrowingTaskGroup(of: Void.self) { group in + group.addTask { + try await storage.saveDoc(id: id, doc: docFromHandle) + } + // DO NOT wait/see if there's an error in the repo attempting to + // store the document - this gives us a bit of "best effort" functionality + // TODO: consider making this a parameter, or review this choice before release + // specifically call/wait in case we get an error from + // the delete process in purging the document. + // try await group.next() + // + // if we want to change this, uncomment the `try await` above and + // convert the `withThrowingTaskGroup` to `try await` as well. + } + } + // TODO: if we're allowed and prolific in gossip, notify any connected + // peers there's a new document before jumping to the 'ready' state + handle.state = .ready + // STRUCT ONLY handles[id] = handle + return DocHandle(id: id, doc: docFromHandle) + } else { + // We don't have the underlying Automerge document, so attempt + // to load it from storage, and failing that - if the storage provider + // doesn't exist, for example - jump forward to attempting to fetch + // it from a peer. + if let doc = try await loadFromStorage(id: id) { + handle.state = .ready + // STRUCT ONLY handles[id] = handle + return DocHandle(id: id, doc: doc) + } else { + handle.state = .requesting + // STRUCT ONLY handles[id] = handle + pendingRequestReadAttempts[id] = 0 + try await self.network.startRemoteFetch(id: handle.id) + return try await resolveDocHandle(id: id) + } + } + case .requesting: + guard let updatedHandle = handles[id] else { + throw Errors.DocUnavailable(id: handle.id) + } + if let doc = updatedHandle.doc, updatedHandle.state == .ready { + return DocHandle(id: id, doc: doc) + } else { + guard let previousRequests = pendingRequestReadAttempts[id] else { + throw Errors.DocUnavailable(id: id) + } + if previousRequests < maxRetriesForFetch { + // we are racing against the receipt of a network result + // to see what we get at the end + try await Task.sleep(for: pendingRequestWaitDuration) + return try await resolveDocHandle(id: id) + } else { + throw Errors.DocUnavailable(id: id) + } + } + case .ready: + guard let doc = handle.doc else { fatalError("DocHandle state is ready, but ._doc is null") } + return DocHandle(id: id, doc: doc) + case .unavailable: + throw Errors.DocUnavailable(id: handle.id) + case .deleted: + throw Errors.DocDeleted(id: handle.id) + } + } else { + throw Errors.DocUnavailable(id: id) + } + } +} diff --git a/Packages/automerge-repo/Sources/AutomergeRepo/RepoTypes.swift b/Packages/automerge-repo/Sources/AutomergeRepo/RepoTypes.swift new file mode 100644 index 00000000..6bb0e39c --- /dev/null +++ b/Packages/automerge-repo/Sources/AutomergeRepo/RepoTypes.swift @@ -0,0 +1,24 @@ +import struct Foundation.Data +import struct Foundation.UUID + +/// A type that represents a peer +/// +/// Typically a UUID4 in string form. +public typealias PEER_ID = String + +/// A type that represents an identity for the storage of a peer. +/// +/// Typically a UUID4 in string form. Receiving peers may tie cached sync state for documents to this identifier. +public typealias STORAGE_ID = String + +/// The external representation of a document Id. +/// +/// Typically a string that is 16 bytes of data encoded in bs58 format. +public typealias MSG_DOCUMENT_ID = String +// internally, DOCUMENT_ID is represented by the internal type DocumentId + +/// A type that represents the raw bytes of an Automerge sync message. +public typealias SYNC_MESSAGE = Data + +/// A type that represents the raw bytes of a set of encoded changes to an Automerge document. +public typealias CHUNK = Data diff --git a/Packages/automerge-repo/Sources/AutomergeRepo/SharePolicy.swift b/Packages/automerge-repo/Sources/AutomergeRepo/SharePolicy.swift new file mode 100644 index 00000000..ffff6df9 --- /dev/null +++ b/Packages/automerge-repo/Sources/AutomergeRepo/SharePolicy.swift @@ -0,0 +1,29 @@ +/// A type that determines if a document may be shared with a peer +public protocol SharePolicy: Sendable { + /// Returns a Boolean value that indicates whether a document may be shared. + /// - Parameters: + /// - peer: The peer to potentially share with + /// - docId: The document Id to share + func share(peer: PEER_ID, docId: DocumentId) async -> Bool +} + +#warning("REWORK THIS SETUP") +// it's annoying as hell to have to specify the SharePolicies.agreeable kind of setup just to get +// this. Seems better to make SharePolicy a struct, rename the protocol to allow for +// generics/existential use, and add some static let variants onto the type itself. +public enum SharePolicies: Sendable { + public static let agreeable = AlwaysPolicy() + public static let readonly = NeverPolicy() + + public struct AlwaysPolicy: SharePolicy { + public func share(peer _: PEER_ID, docId _: DocumentId) async -> Bool { + true + } + } + + public struct NeverPolicy: SharePolicy { + public func share(peer _: PEER_ID, docId _: DocumentId) async -> Bool { + false + } + } +} diff --git a/Packages/automerge-repo/Sources/AutomergeRepo/Storage/DocumentStorage.swift b/Packages/automerge-repo/Sources/AutomergeRepo/Storage/DocumentStorage.swift new file mode 100644 index 00000000..34b84eca --- /dev/null +++ b/Packages/automerge-repo/Sources/AutomergeRepo/Storage/DocumentStorage.swift @@ -0,0 +1,235 @@ +import Automerge +import Foundation +import OSLog + +// inspired from automerge-repo: +// https://github.com/automerge/automerge-repo/blob/main/packages/automerge-repo/src/storage/StorageSubsystem.ts + +/// A type that provides coordinated, concurrency safe access to persist Automerge documents. +public actor DocumentStorage { + let chunkNamespace = "incrChanges" + var compacting: Bool + let _storage: any StorageProvider + var latestHeads: [DocumentId: Set] + + var storedChunkSize: [DocumentId: Int] + var memoryChunkSize: [DocumentId: Int] + var storedDocSize: [DocumentId: Int] + + var chunks: [DocumentId: [Data]] + + /// Creates a new concurrency safe document storage instance to manage changes to Automerge documents. + /// - Parameter storage: The storage provider + public init(_ storage: some StorageProvider) { + compacting = false + _storage = storage + latestHeads = [:] + chunks = [:] + + // memo-ized sizes of documents and chunks so that we don't always have to + // iterate through the storage provider (disk accesses, or even network accesses) + // to get a size determination to know if we should compact or not. + // (used in function`shouldCompact(:DocumentId)`) + storedChunkSize = [:] + memoryChunkSize = [:] + storedDocSize = [:] + } + + public var id: STORAGE_ID { + _storage.id + } + + /// Removes a document from persistent storage. + /// - Parameter id: The id of the document to remove. + public func purgeDoc(id: DocumentId) async throws { + try await _storage.remove(id: id) + } + + /// Returns an existing, or creates a new, document for the document Id you provide. + /// + /// The method throws errors from the underlying storage system or Document errors if the + /// loaded data was corrupt or incorrect. + /// + /// - Parameter id: The document Id + /// - Returns: An automerge document. + public func loadDoc(id: DocumentId) async throws -> Document { + var combined: Data + let storageChunks = try await _storage.loadRange(id: id, prefix: chunkNamespace) + if chunks[id] == nil { + chunks[id] = [] + } + let inMemChunks: [Data] = chunks[id] ?? [] + + if let baseData = try await _storage.load(id: id) { + // loading all the changes from the base document and any incremental saves available + combined = baseData + storedDocSize[id] = baseData.count + } else { + // loading only incremental saves available, the base document doesn't exist in storage + combined = Data() + storedDocSize[id] = 0 + } + + var inMemSize = memoryChunkSize[id] ?? 0 + for chunk in inMemChunks { + inMemSize += chunk.count + combined.append(chunk) + } + memoryChunkSize[id] = inMemSize + + var storedChunks = storedChunkSize[id] ?? 0 + for chunk in storageChunks { + storedChunks += chunk.count + combined.append(chunk) + } + storedChunkSize[id] = storedChunks + let combinedDoc = try Document(combined) + latestHeads[id] = combinedDoc.heads() + + return combinedDoc + } + + /// Determine if a documentId should be compacted. + /// - Parameter key: the document Id to analyze + /// - Returns: a Boolean value that indicates whether the document should be compacted. + func shouldCompact(_ key: DocumentId) async throws -> Bool { + if compacting { + return false + } + let inMemSize = memoryChunkSize[key] ?? (chunks[key] ?? []).reduce(0) { incrSize, data in + incrSize + data.count + } + + let baseSize = if let i = storedDocSize[key] { + i + } else { + try await _storage.load(id: key)?.count ?? 0 + } + + let chunkSize = if let j = storedChunkSize[key] { + j + } else { + try await _storage.loadRange(id: key, prefix: chunkNamespace).reduce(0) { incrSize, data in + incrSize + data.count + } + } + return chunkSize > baseSize || inMemSize > baseSize + } + + /// Determine if the document provided has changes not represented by the underlying storage system + /// - Parameters: + /// - key: The Id of the document + /// - doc: The Automerge document + /// - Returns: A Boolean value that indicates the document has changes. + func shouldSave(for key: DocumentId, doc: Document) -> Bool { + guard let storedHeads = self.latestHeads[key] else { + return true + } + let newHeads = doc.heads() + if newHeads == storedHeads { + return false + } + return true + } + + /// Saves a document to the storage backend, compacting it if needed. + /// - Parameters: + /// - id: The Id of the document + /// - doc: The automerge document + public func saveDoc(id: DocumentId, doc: Document) async throws { + if shouldSave(for: id, doc: doc) { + if try await shouldCompact(id) { + try await compact(id: id, doc: doc) + self.chunks[id] = [] + } else { + try await self.saveIncremental(id: id, doc: doc) + } + } + } + + /// A concurrency safe compaction routine to consolidate in-memory and stored incremental changes into a compacted + /// Automerge document. + /// - Parameters: + /// - id: The document Id to compact + /// - doc: The document to compact. + public func compact(id: DocumentId, doc: Document) async throws { + compacting = true + let providedData = doc.save() + var combined: Data = if let baseData = try await _storage.load(id: id) { + // loading all the changes from the base document and any incremental saves available + baseData + } else { + // loading only incremental saves available, the base document doesn't exist in storage + Data() + } + + combined.append(providedData) + + let inMemChunks: [Data] = chunks[id] ?? [] + var foundChunkHashValues: [Int] = [] + for chunk in inMemChunks { + foundChunkHashValues.append(chunk.hashValue) + combined.append(chunk) + } + + let storageChunks = try await _storage.loadRange(id: id, prefix: chunkNamespace) + for chunk in storageChunks { + combined.append(chunk) + } + + let compactedDoc = try Document(combined) + + let compactedData = compactedDoc.save() + // only remove the chunks AFTER the save is complete + try await _storage.save(id: id, data: compactedData) + storedDocSize[id] = compactedData.count + latestHeads[id] = compactedDoc.heads() + + // refresh the inMemChunks in case its changed (possible with re-entrancy, due to + // the possible suspension points at each of the above `await` statements since we + // grabbed the in memeory reference and made a copy) + var updatedMemChunks = chunks[id] ?? [] + for d in inMemChunks { + if let indexToRemove = updatedMemChunks.firstIndex(of: d) { + updatedMemChunks.remove(at: indexToRemove) + } + } + chunks[id] = updatedMemChunks + memoryChunkSize[id] = updatedMemChunks.reduce(0) { incrSize, data in + incrSize + data.count + } + + // now iterate through and remove the stored chunks we loaded earlier + // Doing this last, intentionally - it's another suspension point, and IF someone + // reads the base document and appends the found changes in a load, they'll still + // end up with the same document, so these can safely be removed _after_ the new + // compacted document has been stored away by the underlying storage provider. + try await _storage.removeRange(id: id, prefix: chunkNamespace, data: storageChunks) + storedChunkSize[id] = try await _storage.loadRange(id: id, prefix: chunkNamespace) + .reduce(0) { incrSize, data in + incrSize + data.count + } + + compacting = false + } + + /// Save incremental changes of the existing Automerge document. + /// - Parameters: + /// - id: The Id of the document + /// - doc: The automerge document + public func saveIncremental(id: DocumentId, doc: Document) async throws { + var chunkCollection = chunks[id] ?? [] + let oldHeads = latestHeads[id] ?? Set() + let incrementalChanges = try doc.encodeChangesSince(heads: oldHeads) + chunkCollection.append(incrementalChanges) + chunks[id] = chunkCollection + try await _storage.addToRange(id: id, prefix: chunkNamespace, data: incrementalChanges) + latestHeads[id] = doc.heads() + } + +// public func loadSyncState(id _: DocumentId, storageId _: SyncV1.STORAGE_ID) async -> SyncState { +// SyncState() +// } +// +// public func saveSyncState(id _: DocumentId, storageId _: SyncV1.STORAGE_ID, state _: SyncState) async {} +} diff --git a/Packages/automerge-repo/Sources/AutomergeRepo/Storage/StorageProvider.swift b/Packages/automerge-repo/Sources/AutomergeRepo/Storage/StorageProvider.swift new file mode 100644 index 00000000..f6b448d8 --- /dev/null +++ b/Packages/automerge-repo/Sources/AutomergeRepo/Storage/StorageProvider.swift @@ -0,0 +1,18 @@ +import struct Foundation.Data + +// loose adaptation from automerge-repo storage interface +// https://github.com/automerge/automerge-repo/blob/main/packages/automerge-repo/src/storage/StorageAdapter.ts +/// A type that provides a an interface for persisting the changes of Automerge documents by Id +public protocol StorageProvider: Sendable { + var id: STORAGE_ID { get } + + func load(id: DocumentId) async throws -> Data? + func save(id: DocumentId, data: Data) async throws + func remove(id: DocumentId) async throws + + // MARK: Incremental Load Support + + func addToRange(id: DocumentId, prefix: String, data: Data) async throws + func loadRange(id: DocumentId, prefix: String) async throws -> [Data] + func removeRange(id: DocumentId, prefix: String, data: [Data]) async throws +} diff --git a/Packages/automerge-repo/Sources/AutomergeRepo/Sync/CBORCoder.swift b/Packages/automerge-repo/Sources/AutomergeRepo/Sync/CBORCoder.swift new file mode 100644 index 00000000..9160ac73 --- /dev/null +++ b/Packages/automerge-repo/Sources/AutomergeRepo/Sync/CBORCoder.swift @@ -0,0 +1,8 @@ +import Foundation +import PotentCBOR + +/// A type that provides concurrency-safe access to the CBOR encoder and decoder. +public actor CBORCoder { + public static let encoder = CBOREncoder() + public static let decoder = CBORDecoder() +} diff --git a/MeetingNotes/Sync/DocumentSyncCoordinator.swift b/Packages/automerge-repo/Sources/AutomergeRepo/Sync/DocumentSyncCoordinator.swift similarity index 85% rename from MeetingNotes/Sync/DocumentSyncCoordinator.swift rename to Packages/automerge-repo/Sources/AutomergeRepo/Sync/DocumentSyncCoordinator.swift index e82846dd..42344e19 100644 --- a/MeetingNotes/Sync/DocumentSyncCoordinator.swift +++ b/Packages/automerge-repo/Sources/AutomergeRepo/Sync/DocumentSyncCoordinator.swift @@ -7,59 +7,47 @@ import OSLog import UIKit // for UIDevice.name access #endif -/// A type that provides type-safe strings for TXTRecord publications with Bonjour -enum TXTRecordKeys { - /// The document identifier. - static var doc_id = "doc_id" - /// The peer identifier. - static var peer_id = "peer_id" - /// The human-readable name for the peer. - static var name = "name" -} - /// A collection of User Default keys for the app. -enum MeetingNotesDefaultKeys { - /// The key to the string that the app broadcasts to represent you when sharing and syncing MeetingNotes. - static let sharingIdentity = "sharingIdentity" +public enum SynchronizerDefaultKeys: Sendable { + /// The key to the string that the app broadcasts to represent you when sharing and syncing Automerge Documents. + public static let publicPeerName = "sharingIdentity" } -/// A weak reference to a MeetingNotes document -/// -/// Allow a global singleton keep references to documents without incurring memory leaks as Documents are opened and -/// closed. -final class WeakMeetingNotesDocumentRef { - weak var value: MeetingNotesDocument? - - init(_ value: MeetingNotesDocument? = nil) { - self.value = value - } +/// A global actor for safely isolating the state updates for the DocumentSyncCoordinator +@globalActor +public actor SyncController { + public static let shared = SyncController() } /// A application-shared sync controller that supports coordinates documents and network connections with peers. +@MainActor public final class DocumentSyncCoordinator: ObservableObject { - var documents: [DocumentId: WeakMeetingNotesDocumentRef] = [:] + public static let shared = DocumentSyncCoordinator() + + var documents: [DocumentId: WeakDocumentRef] = [:] var txtRecords: [DocumentId: NWTXTRecord] = [:] var listeners: [DocumentId: NWListener] = [:] - @Published var listenerState: [DocumentId: NWListener.State] = [:] + + @Published public var listenerState: [DocumentId: NWListener.State] = [:] /// Looks up and returns a reference for a document for an initiated Peer Connection /// /// Primarily in order to attempt to send and receive sync updates. - func automergeDocument(for docId: DocumentId) -> Document? { - documents[docId]?.value?.doc + public func automergeDocument(for docId: DocumentId) -> Document? { + documents[docId]?.value } - func registerDocument(_ document: MeetingNotesDocument) { - documents[document.id] = WeakMeetingNotesDocumentRef(document) - + public func registerDocument(document: Automerge.Document, id: DocumentId? = nil) { + let documentId: DocumentId = id ?? DocumentId() + documents[documentId] = WeakDocumentRef(document) var txtRecord = NWTXTRecord() txtRecord[TXTRecordKeys.name] = name txtRecord[TXTRecordKeys.peer_id] = peerId.uuidString - txtRecord[TXTRecordKeys.doc_id] = document.id.description - txtRecords[document.id] = txtRecord + txtRecord[TXTRecordKeys.doc_id] = documentId.description + txtRecords[documentId] = txtRecord } - @Published var name: String { + @Published public var name: String { didSet { // update a listener, if running, with the new name. resetName(name) @@ -67,35 +55,35 @@ public final class DocumentSyncCoordinator: ObservableObject { } var browser: NWBrowser? - @Published var browserResults: [NWBrowser.Result] = [] - @Published var browserState: NWBrowser.State = .setup - var autoconnect: Bool + @Published public var browserResults: [NWBrowser.Result] = [] + @Published public var browserState: NWBrowser.State = .setup + public var autoconnect: Bool - @Published var connections: [SyncConnection] = [] + @Published public var connections: [BonjourSyncConnection] = [] func removeConnection(_ connectionId: UUID) { connections.removeAll { $0.connectionId == connectionId } } - @Published var listenerSetupError: Error? = nil - @Published var listenerStatusError: NWError? = nil + @Published public var listenerSetupError: (any Error)? = nil + @Published public var listenerStatusError: NWError? = nil let peerId = UUID() let syncQueue = DispatchQueue(label: "PeerSyncQueue") - var timerCancellable: Cancellable? + var timerCancellable: (any Cancellable)? var syncTrigger: PassthroughSubject = PassthroughSubject() - static func defaultSharingIdentity() -> String { + public static func defaultSharingIdentity() -> String { #if os(iOS) UIDevice().name #elseif os(macOS) - Host.current().localizedName ?? "MeetingNotes User" + Host.current().localizedName ?? "Automerge User" #endif } - public init() { + init() { self.name = UserDefaults.standard - .string(forKey: MeetingNotesDefaultKeys.sharingIdentity) ?? DocumentSyncCoordinator.defaultSharingIdentity() + .string(forKey: SynchronizerDefaultKeys.publicPeerName) ?? DocumentSyncCoordinator.defaultSharingIdentity() Logger.syncController.debug("SYNC CONTROLLER INIT, peer \(self.peerId.uuidString, privacy: .public)") #if os(iOS) autoconnect = true @@ -131,7 +119,7 @@ public final class DocumentSyncCoordinator: ObservableObject { // MARK: NWBrowser - func attemptToConnectToPeer(_ endpoint: NWEndpoint, forPeer peerId: String, withDoc documentId: DocumentId) { + public func attemptToConnectToPeer(_ endpoint: NWEndpoint, forPeer peerId: String, withDoc documentId: DocumentId) { Logger.syncController .debug( "Attempting to establish connection to \(peerId, privacy: .public) through \(endpoint.debugDescription, privacy: .public) " @@ -141,7 +129,7 @@ public final class DocumentSyncCoordinator: ObservableObject { }).isEmpty { Logger.syncController .debug("No connection stored for \(peerId, privacy: .public)") - let newConnection = SyncConnection( + let newConnection = BonjourSyncConnection( endpoint: endpoint, peerId: peerId, trigger: syncTrigger.eraseToAnyPublisher(), @@ -177,7 +165,7 @@ public final class DocumentSyncCoordinator: ObservableObject { using: browserNetworkParameters ) - newNetworkBrowser.stateUpdateHandler = { newState in + newNetworkBrowser.stateUpdateHandler = { @MainActor newState in switch newState { case let .failed(error): self.browserState = .failed(error) @@ -200,7 +188,7 @@ public final class DocumentSyncCoordinator: ObservableObject { } } - newNetworkBrowser.browseResultsChangedHandler = { [weak self] results, _ in + newNetworkBrowser.browseResultsChangedHandler = { @MainActor [weak self] results, _ in Logger.syncController.debug("browser update shows \(results.count, privacy: .public) result(s):") for res in results { Logger.syncController @@ -287,7 +275,7 @@ public final class DocumentSyncCoordinator: ObservableObject { type: P2PAutomergeSyncProtocol.bonjourType, txtRecord: txtRecordForDoc ) - listener.stateUpdateHandler = { [weak self] newState in + listener.stateUpdateHandler = { @MainActor [weak self] newState in self?.listenerState[documentId] = newState switch newState { case .ready: @@ -319,7 +307,7 @@ public final class DocumentSyncCoordinator: ObservableObject { // The system calls this when a new connection arrives at the listener. // Start the connection to accept it, or cancel to reject it. - listener.newConnectionHandler = { [weak self] newConnection in + listener.newConnectionHandler = { @MainActor [weak self] newConnection in Logger.syncController .debug( "Receiving connection request from \(newConnection.endpoint.debugDescription, privacy: .public)" @@ -337,7 +325,7 @@ public final class DocumentSyncCoordinator: ObservableObject { .info( "Endpoint not yet recorded, accepting connection from \(newConnection.endpoint.debugDescription, privacy: .public)" ) - let peerConnection = SyncConnection( + let peerConnection = BonjourSyncConnection( connection: newConnection, trigger: syncTrigger.eraseToAnyPublisher(), documentId: documentId @@ -405,3 +393,7 @@ public final class DocumentSyncCoordinator: ObservableObject { } } } + +// public extension DocumentSyncCoordinator { +// static let shared = DocumentSyncCoordinator() +// } diff --git a/Packages/automerge-repo/Sources/AutomergeRepo/Sync/ProtocolState.swift b/Packages/automerge-repo/Sources/AutomergeRepo/Sync/ProtocolState.swift new file mode 100644 index 00000000..05ab4e9a --- /dev/null +++ b/Packages/automerge-repo/Sources/AutomergeRepo/Sync/ProtocolState.swift @@ -0,0 +1,42 @@ +/// The state of a sync protocol connection. +public enum ProtocolState: String { + /// The connection that has been created but not yet connected + case setup + + /// The connection is established, waiting to successfully peer with the recipient. + case preparing + + /// The connection successfully peered and is ready for use. + case ready + + /// The connection is cancelled, failed, or terminated. + case closed +} + +#if canImport(Network) +import class Network.NWConnection + +extension ProtocolState { + /// Translates a Network connection state into a protocol state + /// - Parameter connectState: The state of the network connection + /// - Returns: The corresponding protocol state + func from(_ connectState: NWConnection.State) -> Self { + switch connectState { + case .setup: + .setup + case .waiting: + .preparing + case .preparing: + .preparing + case .ready: + .ready + case .failed: + .closed + case .cancelled: + .closed + @unknown default: + fatalError() + } + } +} +#endif diff --git a/Packages/automerge-repo/Sources/AutomergeRepo/Sync/SyncV1Msg+Errors.swift b/Packages/automerge-repo/Sources/AutomergeRepo/Sync/SyncV1Msg+Errors.swift new file mode 100644 index 00000000..7d8343d5 --- /dev/null +++ b/Packages/automerge-repo/Sources/AutomergeRepo/Sync/SyncV1Msg+Errors.swift @@ -0,0 +1,36 @@ +import Foundation + +public extension SyncV1Msg { + enum Errors: Sendable { + public struct Timeout: Sendable, LocalizedError { + public var errorDescription: String = "Task timed out before completion" + } + + public struct SyncComplete: Sendable, LocalizedError { + public var errorDescription: String = "The synchronization process is complete" + } + + public struct ConnectionClosed: Sendable, LocalizedError { + public var errorDescription: String = "The websocket task was closed and/or nil" + } + + #warning("MOVE TO REPO ERRORS") + public struct InvalidURL: Sendable, LocalizedError { + public var urlString: String + public var errorDescription: String? { + "Invalid URL: \(urlString)" + } + } + + public struct UnexpectedMsg: Sendable, LocalizedError { + public var msg: MSG + public var errorDescription: String? { + "Received an unexpected message: \(msg)" + } + } + + public struct DocumentUnavailable: Sendable, LocalizedError { + public var errorDescription: String = "The requested document isn't available" + } + } +} diff --git a/Packages/automerge-repo/Sources/AutomergeRepo/Sync/SyncV1Msg+encode+decode.swift b/Packages/automerge-repo/Sources/AutomergeRepo/Sync/SyncV1Msg+encode+decode.swift new file mode 100644 index 00000000..7879eff0 --- /dev/null +++ b/Packages/automerge-repo/Sources/AutomergeRepo/Sync/SyncV1Msg+encode+decode.swift @@ -0,0 +1,314 @@ +import Foundation // Data +import OSLog +import PotentCBOR + +public extension SyncV1Msg { + /// Attempts to decode the data you provide as a peer message. + /// + /// - Parameter data: The data to decode + /// - Returns: The decoded message, or ``SyncV1/unknown(_:)`` if the decoding attempt failed. + static func decodePeer(_ data: Data) -> SyncV1Msg { + if let peerMsg = attemptPeer(data) { + .peer(peerMsg) + } else { + .unknown(data) + } + } + + /// Decodes a Peer2Peer message data block using the message type you provide + /// - Parameters: + /// - data: The data to be decoded + /// - msgType: The type of message to decode. + /// - Returns: The decoded message. + internal static func decode(_ data: Data, as msgType: P2PSyncMessageType) -> SyncV1Msg { + switch msgType { + case .unknown: + return .unknown(data) + case .sync: + if let msgData = attemptSync(data) { + return .sync(msgData) + } + case .id: + return .unknown(data) + + case .leave: + if let msgData = attemptLeave(data) { + return .leave(msgData) + } + case .peer: + if let msgData = attemptPeer(data) { + return .peer(msgData) + } + case .join: + if let msgData = attemptJoin(data) { + return .join(msgData) + } + case .request: + if let msgData = attemptRequest(data) { + return .request(msgData) + } + case .unavailable: + if let msgData = attemptUnavailable(data) { + return .unavailable(msgData) + } + case .ephemeral: + if let msgData = attemptEphemeral(data) { + return .ephemeral(msgData) + } + case .syncerror: + if let msgData = attemptError(data) { + return .error(msgData) + } + case .remoteHeadsChanged: + if let msgData = attemptRemoteHeadsChanged(data) { + return .remoteHeadsChanged(msgData) + } + case .remoteSubscriptionChange: + if let msgData = attemptRemoteSubscriptionChange(data) { + return .remoteSubscriptionChange(msgData) + } + } + return .unknown(data) + } + + /// Exhaustively attempt to decode incoming data as V1 protocol messages. + /// + /// - Parameters: + /// - data: The data to decode. + /// - withGossip: A Boolean value that indicates whether to include decoding of handshake messages. + /// - withHandshake: A Boolean value that indicates whether to include decoding of gossip messages. + /// - Returns: The decoded message, or ``SyncV1/unknown(_:)`` if the previous decoding attempts failed. + /// + /// The decoding is ordered from the perspective of an initiating client expecting a response to minimize attempts. + /// Enable `withGossip` to attempt to decode head gossip messages, and `withHandshake` to include handshake phase + /// messages. + /// With both `withGossip` and `withHandshake` set to `true`, the decoding is exhaustive over all V1 messages. + static func decode(_ data: Data) -> SyncV1Msg { + var cborMsg: CBOR? = nil + + // attempt to deserialize CBOR message (in order to read the type from it) + do { + cborMsg = try CBORSerialization.cbor(from: data) + } catch { + Logger.webSocket.warning("Unable to CBOR decode incoming data: \(data)") + return .unknown(data) + } + // read the "type" of the message in order to choose the appropriate decoding path + guard let msgType = cborMsg?.mapValue?["type"]?.utf8StringValue else { + return .unknown(data) + } + + switch msgType { + case MsgTypes.peer: + if let peerMsg = attemptPeer(data) { + return .peer(peerMsg) + } + case MsgTypes.sync: + if let syncMsg = attemptSync(data) { + return .sync(syncMsg) + } + case MsgTypes.ephemeral: + if let ephemeralMsg = attemptEphemeral(data) { + return .ephemeral(ephemeralMsg) + } + case MsgTypes.error: + if let errorMsg = attemptError(data) { + return .error(errorMsg) + } + case MsgTypes.unavailable: + if let unavailableMsg = attemptUnavailable(data) { + return .unavailable(unavailableMsg) + } + case MsgTypes.join: + if let joinMsg = attemptJoin(data) { + return .join(joinMsg) + } + case MsgTypes.remoteHeadsChanged: + if let remoteHeadsChanged = attemptRemoteHeadsChanged(data) { + return .remoteHeadsChanged(remoteHeadsChanged) + } + case MsgTypes.request: + if let requestMsg = attemptRequest(data) { + return .request(requestMsg) + } + case MsgTypes.remoteSubscriptionChange: + if let remoteSubChangeMsg = attemptRemoteSubscriptionChange(data) { + return .remoteSubscriptionChange(remoteSubChangeMsg) + } + + default: + return .unknown(data) + } + return .unknown(data) + } + + // sync phase messages + + internal static func attemptSync(_ data: Data) -> SyncMsg? { + do { + return try CBORCoder.decoder.decode(SyncMsg.self, from: data) + } catch { + Logger.webSocket.warning("Failed to decode data as SyncMsg") + } + return nil + } + + internal static func attemptRequest(_ data: Data) -> RequestMsg? { + do { + return try CBORCoder.decoder.decode(RequestMsg.self, from: data) + } catch { + Logger.webSocket.warning("Failed to decode data as RequestMsg") + } + return nil + } + + internal static func attemptUnavailable(_ data: Data) -> UnavailableMsg? { + do { + return try CBORCoder.decoder.decode(UnavailableMsg.self, from: data) + } catch { + Logger.webSocket.warning("Failed to decode data as UnavailableMsg") + } + return nil + } + + // handshake phase messages + + internal static func attemptPeer(_ data: Data) -> PeerMsg? { + do { + return try CBORCoder.decoder.decode(PeerMsg.self, from: data) + } catch { + Logger.webSocket.warning("Failed to decode data as PeerMsg") + } + return nil + } + + internal static func attemptJoin(_ data: Data) -> JoinMsg? { + do { + return try CBORCoder.decoder.decode(JoinMsg.self, from: data) + } catch { + Logger.webSocket.warning("Failed to decode data as JoinMsg") + } + return nil + } + + internal static func attemptLeave(_ data: Data) -> LeaveMsg? { + do { + return try CBORCoder.decoder.decode(LeaveMsg.self, from: data) + } catch { + Logger.webSocket.warning("Failed to decode data as LeaveMsg") + } + return nil + } + + // error + + internal static func attemptError(_ data: Data) -> ErrorMsg? { + do { + return try CBORCoder.decoder.decode(ErrorMsg.self, from: data) + } catch { + Logger.webSocket.warning("Failed to decode data as ErrorMsg") + } + return nil + } + + // ephemeral + + internal static func attemptEphemeral(_ data: Data) -> EphemeralMsg? { + do { + return try CBORCoder.decoder.decode(EphemeralMsg.self, from: data) + } catch { + Logger.webSocket.warning("Failed to decode data as EphemeralMsg") + } + return nil + } + + // gossip + + internal static func attemptRemoteHeadsChanged(_ data: Data) -> RemoteHeadsChangedMsg? { + do { + return try CBORCoder.decoder.decode(RemoteHeadsChangedMsg.self, from: data) + } catch { + Logger.webSocket.warning("Failed to decode data as RemoteHeadsChangedMsg") + } + return nil + } + + internal static func attemptRemoteSubscriptionChange(_ data: Data) -> RemoteSubscriptionChangeMsg? { + do { + return try CBORCoder.decoder.decode(RemoteSubscriptionChangeMsg.self, from: data) + } catch { + Logger.webSocket.warning("Failed to decode data as RemoteSubscriptionChangeMsg") + } + return nil + } + + // encode messages + + static func encode(_ msg: JoinMsg) throws -> Data { + try CBORCoder.encoder.encode(msg) + } + + static func encode(_ msg: RequestMsg) throws -> Data { + try CBORCoder.encoder.encode(msg) + } + + static func encode(_ msg: LeaveMsg) throws -> Data { + try CBORCoder.encoder.encode(msg) + } + + static func encode(_ msg: SyncMsg) throws -> Data { + try CBORCoder.encoder.encode(msg) + } + + static func encode(_ msg: PeerMsg) throws -> Data { + try CBORCoder.encoder.encode(msg) + } + + static func encode(_ msg: UnavailableMsg) throws -> Data { + try CBORCoder.encoder.encode(msg) + } + + static func encode(_ msg: EphemeralMsg) throws -> Data { + try CBORCoder.encoder.encode(msg) + } + + static func encode(_ msg: RemoteSubscriptionChangeMsg) throws -> Data { + try CBORCoder.encoder.encode(msg) + } + + static func encode(_ msg: RemoteHeadsChangedMsg) throws -> Data { + try CBORCoder.encoder.encode(msg) + } + + static func encode(_ msg: ErrorMsg) throws -> Data { + try CBORCoder.encoder.encode(msg) + } + + static func encode(_ msg: SyncV1Msg) throws -> Data { + // not sure this is useful, but might as well finish out the set... + switch msg { + case let .peer(peerMsg): + try CBORCoder.encoder.encode(peerMsg) + case let .join(joinMsg): + try CBORCoder.encoder.encode(joinMsg) + case let .leave(leaveMsg): + try CBORCoder.encoder.encode(leaveMsg) + case let .error(errorMsg): + try CBORCoder.encoder.encode(errorMsg) + case let .request(requestMsg): + try CBORCoder.encoder.encode(requestMsg) + case let .sync(syncMsg): + try CBORCoder.encoder.encode(syncMsg) + case let .unavailable(unavailableMsg): + try CBORCoder.encoder.encode(unavailableMsg) + case let .ephemeral(ephemeralMsg): + try CBORCoder.encoder.encode(ephemeralMsg) + case let .remoteSubscriptionChange(remoteSubscriptionChangeMsg): + try CBORCoder.encoder.encode(remoteSubscriptionChangeMsg) + case let .remoteHeadsChanged(remoteHeadsChangedMsg): + try CBORCoder.encoder.encode(remoteHeadsChangedMsg) + case let .unknown(data): + data + } + } +} diff --git a/Packages/automerge-repo/Sources/AutomergeRepo/Sync/SyncV1Msg+messages.swift b/Packages/automerge-repo/Sources/AutomergeRepo/Sync/SyncV1Msg+messages.swift new file mode 100644 index 00000000..53caceea --- /dev/null +++ b/Packages/automerge-repo/Sources/AutomergeRepo/Sync/SyncV1Msg+messages.swift @@ -0,0 +1,396 @@ +import Foundation + +public extension SyncV1Msg { + // - join - + // { + // type: "join", + // senderId: peer_id, + // supportedProtocolVersions: protocol_version + // ? metadata: peer_metadata, + // } + + // MARK: Join/Peer + + /// A message that indicates a desire to peer and sync documents. + /// + /// Sent by the initiating peer (represented by `senderId`) to initiate a connection to manage documents between + /// peers. + /// The next response is expected to be a ``PeerMsg``. If any other message is received after sending `JoinMsg`, the + /// initiating client should disconnect. + /// If the receiving peer receives any message other than a `JoinMsg` from the initiating peer, it is expected to + /// terminate the connection. + struct JoinMsg: Sendable, Codable, CustomDebugStringConvertible { + public var type: String = SyncV1Msg.MsgTypes.join + public let senderId: PEER_ID + public var supportedProtocolVersions: String = "1" + public var peerMetadata: PeerMetadata? + + public init(senderId: PEER_ID, metadata: PeerMetadata? = nil) { + self.senderId = senderId + if let metadata { + self.peerMetadata = metadata + } + } + + public var debugDescription: String { + "JOIN[version: \(supportedProtocolVersions), sender: \(senderId), metadata: \(peerMetadata?.debugDescription ?? "nil")]" + } + } + + // - peer - (expected response to join) + // { + // type: "peer", + // senderId: peer_id, + // selectedProtocolVersion: protocol_version, + // targetId: peer_id, + // ? metadata: peer_metadata, + // } + + // example output from sync.automerge.org: + // { + // "type": "peer", + // "senderId": "storage-server-sync-automerge-org", + // "peerMetadata": {"storageId": "3760df37-a4c6-4f66-9ecd-732039a9385d", "isEphemeral": false}, + // "selectedProtocolVersion": "1", + // "targetId": "FA38A1B2-1433-49E7-8C3C-5F63C117DF09" + // } + + /// A message that acknowledges a join request. + /// + /// A response sent by a receiving peer (represented by `targetId`) after receiving a ``JoinMsg`` that indicates + /// sync, + /// gossiping, and ephemeral messages may now be initiated. + struct PeerMsg: Sendable, Codable, CustomDebugStringConvertible { + public var type: String = SyncV1Msg.MsgTypes.peer + public let senderId: PEER_ID + public let targetId: PEER_ID + public var peerMetadata: PeerMetadata? + public var selectedProtocolVersion: String + + public init(senderId: PEER_ID, targetId: PEER_ID, storageId: String?, ephemeral: Bool = true) { + self.senderId = senderId + self.targetId = targetId + self.selectedProtocolVersion = "1" + self.peerMetadata = PeerMetadata(storageId: storageId, isEphemeral: ephemeral) + } + + public var debugDescription: String { + "PEER[version: \(selectedProtocolVersion), sender: \(senderId), target: \(targetId), metadata: \(peerMetadata?.debugDescription ?? "nil")]" + } + } + + // - leave - + // { + // type: "leave" + // senderId: this.peerId + // } + + struct LeaveMsg: Sendable, Codable, CustomDebugStringConvertible { + public var type: String = SyncV1Msg.MsgTypes.leave + public let senderId: PEER_ID + + public init(senderId: PEER_ID) { + self.senderId = senderId + } + + public var debugDescription: String { + "LEAVE[sender: \(senderId)" + } + } + + // - error - + // { + // type: "error", + // message: str, + // } + + /// A sync error message + struct ErrorMsg: Sendable, Codable, CustomDebugStringConvertible { + public var type: String = SyncV1Msg.MsgTypes.error + public let message: String + + public init(message: String) { + self.message = message + } + + public var debugDescription: String { + "ERROR[msg: \(message)" + } + } + + // MARK: Sync + + // - request - + // { + // type: "request", + // documentId: document_id, + // ; The peer requesting to begin sync + // senderId: peer_id, + // targetId: peer_id, + // ; The initial automerge sync message from the sender + // data: sync_message + // } + + /// A request to synchronize an Automerge document. + /// + /// Sent when the initiating peer (represented by `senderId`) is asking to begin sync for the given document ID. + /// Identical to ``SyncMsg`` but indicates to the receiving peer that the sender would like an ``UnavailableMsg`` + /// message if the receiving peer (represented by `targetId` does not have the document (identified by + /// `documentId`). + struct RequestMsg: Sendable, Codable, CustomDebugStringConvertible { + public var type: String = SyncV1Msg.MsgTypes.request + public let documentId: MSG_DOCUMENT_ID + public let senderId: PEER_ID // The peer requesting to begin sync + public let targetId: PEER_ID + public let data: Data // The initial automerge sync message from the sender + + public init(documentId: MSG_DOCUMENT_ID, senderId: PEER_ID, targetId: PEER_ID, sync_message: Data) { + self.documentId = documentId + self.senderId = senderId + self.targetId = targetId + self.data = sync_message + } + + public var debugDescription: String { + "REQUEST[documentId: \(documentId), sender: \(senderId), target: \(targetId), data: \(data.count) bytes]" + } + } + + // - sync - + // { + // type: "sync", + // documentId: document_id, + // ; The peer requesting to begin sync + // senderId: peer_id, + // targetId: peer_id, + // ; The initial automerge sync message from the sender + // data: sync_message + // } + + /// A request to synchronize an Automerge document. + /// + /// Sent when the initiating peer (represented by `senderId`) is asking to begin sync for the given document ID. + /// Use `SyncMsg` instead of `RequestMsg` when you are creating a new Automerge document that you want to share. + /// + /// If the receiving peer doesn't have an Automerge document represented by `documentId` and can't or won't store + /// the + /// document. + struct SyncMsg: Sendable, Codable, CustomDebugStringConvertible { + public var type = SyncV1Msg.MsgTypes.sync + public let documentId: MSG_DOCUMENT_ID + public let senderId: PEER_ID // The peer requesting to begin sync + public let targetId: PEER_ID + public let data: Data // The initial automerge sync message from the sender + + public init(documentId: MSG_DOCUMENT_ID, senderId: PEER_ID, targetId: PEER_ID, sync_message: Data) { + self.documentId = documentId + self.senderId = senderId + self.targetId = targetId + self.data = sync_message + } + + public var debugDescription: String { + "SYNC[documentId: \(documentId), sender: \(senderId), target: \(targetId), data: \(data.count) bytes]" + } + } + + // - unavailable - + // { + // type: "doc-unavailable", + // senderId: peer_id, + // targetId: peer_id, + // documentId: document_id, + // } + + /// A message that indicates a document is unavailable. + /// + /// Generally a response for a ``RequestMsg`` from an initiating peer (represented by `senderId`) that the receiving + /// peer (represented by `targetId`) doesn't have a copy of the requested Document, or is unable to share it. + struct UnavailableMsg: Sendable, Codable, CustomDebugStringConvertible { + public var type = SyncV1Msg.MsgTypes.unavailable + public let documentId: MSG_DOCUMENT_ID + public let senderId: PEER_ID + public let targetId: PEER_ID + + public init(documentId: MSG_DOCUMENT_ID, senderId: PEER_ID, targetId: PEER_ID) { + self.documentId = documentId + self.senderId = senderId + self.targetId = targetId + } + + public var debugDescription: String { + "UNAVAILABLE[documentId: \(documentId), sender: \(senderId), target: \(targetId)]" + } + } + + // MARK: Ephemeral + + // - ephemeral - + // { + // type: "ephemeral", + // ; The peer who sent this message + // senderId: peer_id, + // ; The target of this message + // targetId: peer_id, + // ; The sequence number of this message within its session + // count: uint, + // ; The unique session identifying this stream of ephemeral messages + // sessionId: str, + // ; The document ID this ephemera relates to + // documentId: document_id, + // ; The data of this message (in practice this is arbitrary CBOR) + // data: bstr + // } + + struct EphemeralMsg: Sendable, Codable, CustomDebugStringConvertible { + public var type = SyncV1Msg.MsgTypes.ephemeral + public let senderId: PEER_ID + public let targetId: PEER_ID + public let count: UInt + public let sessionId: String + public let documentId: MSG_DOCUMENT_ID + public let data: Data + + public init( + senderId: PEER_ID, + targetId: PEER_ID, + count: UInt, + sessionId: String, + documentId: MSG_DOCUMENT_ID, + data: Data + ) { + self.senderId = senderId + self.targetId = targetId + self.count = count + self.sessionId = sessionId + self.documentId = documentId + self.data = data + } + + public var debugDescription: String { + "EPHEMERAL[documentId: \(documentId), sender: \(senderId), target: \(targetId), count: \(count), sessionId: \(sessionId), data: \(data.count) bytes]" + } + } + + // MARK: Head's Gossiping + + // - remote subscription changed - + // { + // type: "remote-subscription-change" + // senderId: peer_id + // targetId: peer_id + // + // ; The storage IDs to add to the subscription + // ? add: [* storage_id] + // + // ; The storage IDs to remove from the subscription + // remove: [* storage_id] + // } + + struct RemoteSubscriptionChangeMsg: Sendable, Codable, CustomDebugStringConvertible { + public var type = SyncV1Msg.MsgTypes.remoteSubscriptionChange + public let senderId: PEER_ID + public let targetId: PEER_ID + public var add: [STORAGE_ID]? + public var remove: [STORAGE_ID] + + public init(senderId: PEER_ID, targetId: PEER_ID, add: [STORAGE_ID]? = nil, remove: [STORAGE_ID]) { + self.senderId = senderId + self.targetId = targetId + self.add = add + self.remove = remove + } + + public var debugDescription: String { + var returnString = "REMOTE_SUBSCRIPTION_CHANGE[sender: \(senderId), target: \(targetId)]" + if let add { + returnString.append("\n add: [") + returnString.append(add.joined(separator: ",")) + returnString.append("]") + } + returnString.append("\n remove: [") + returnString.append(remove.joined(separator: ",")) + returnString.append("]") + return returnString + } + } + + // - remote heads changed + // { + // type: "remote-heads-changed" + // senderId: peer_id + // targetId: peer_id + // + // ; The document ID of the document that has changed + // documentId: document_id + // + // ; A map from storage ID to the heads advertised for a given storage ID + // newHeads: { + // * storage_id => { + // ; The heads of the new document for the given storage ID as + // ; a list of base64 encoded SHA2 hashes + // heads: [* string] + // ; The local time on the node which initially sent the remote-heads-changed + // ; message as milliseconds since the unix epoch + // timestamp: uint + // } + // } + // } + + struct RemoteHeadsChangedMsg: Sendable, Codable, CustomDebugStringConvertible { + public struct HeadsAtTime: Codable, CustomDebugStringConvertible, Sendable { + public var heads: [String] + public let timestamp: uint + + public init(heads: [String], timestamp: uint) { + self.heads = heads + self.timestamp = timestamp + } + + public var debugDescription: String { + "\(timestamp):[\(heads.joined(separator: ","))]" + } + } + + public var type = SyncV1Msg.MsgTypes.remoteHeadsChanged + public let senderId: PEER_ID + public let targetId: PEER_ID + public let documentId: MSG_DOCUMENT_ID + public var newHeads: [STORAGE_ID: HeadsAtTime] + public var add: [STORAGE_ID] + public var remove: [STORAGE_ID] + + public init( + senderId: PEER_ID, + targetId: PEER_ID, + documentId: MSG_DOCUMENT_ID, + newHeads: [STORAGE_ID: HeadsAtTime], + add: [STORAGE_ID], + remove: [STORAGE_ID] + ) { + self.senderId = senderId + self.targetId = targetId + self.documentId = documentId + self.newHeads = newHeads + self.add = add + self.remove = remove + } + + public var debugDescription: String { + var returnString = + "REMOTE_HEADS_CHANGED[documentId: \(documentId), sender: \(senderId), target: \(targetId)]" + returnString.append("\n heads:") + for (storage_id, headsAtTime) in newHeads { + returnString.append("\n \(storage_id) : \(headsAtTime.debugDescription)") + } + returnString.append("\n add: [") + returnString.append(add.joined(separator: ", ")) + returnString.append("]") + + returnString.append("\n remove: [") + returnString.append(remove.joined(separator: ", ")) + returnString.append("]") + return returnString + } + } +} diff --git a/Packages/automerge-repo/Sources/AutomergeRepo/Sync/SyncV1Msg.swift b/Packages/automerge-repo/Sources/AutomergeRepo/Sync/SyncV1Msg.swift new file mode 100644 index 00000000..37052361 --- /dev/null +++ b/Packages/automerge-repo/Sources/AutomergeRepo/Sync/SyncV1Msg.swift @@ -0,0 +1,118 @@ +// +// SyncV1Msg.swift +// MeetingNotes +// +// Created by Joseph Heck on 1/24/24. +// + +import Foundation +import OSLog +import PotentCBOR + +// Automerge Repo WebSocket sync details: +// https://github.com/automerge/automerge-repo/blob/main/packages/automerge-repo-network-websocket/README.md +// explicitly using a protocol version "1" here - make sure to specify (and verify?) that + +// related source for the automerge-repo sync code: +// https://github.com/automerge/automerge-repo/blob/main/packages/automerge-repo-network-websocket/src/BrowserWebSocketClientAdapter.ts +// All the WebSocket messages are CBOR encoded and sent as data streams + +/// A type that encapsulates valid V1 Automerge-repo sync protocol messages. +public indirect enum SyncV1Msg: Sendable { + // CDDL pre-amble + // ; The base64 encoded bytes of a Peer ID + // peer_id = str + // ; The base64 encoded bytes of a Storage ID + // storage_id = str + // ; The possible protocol versions (currently always the string "1") + // protocol_version = "1" + // ; The bytes of an automerge sync message + // sync_message = bstr + // ; The base58check encoded bytes of a document ID + // document_id = str + + /// The collection of value "type" strings for the V1 automerge-repo protocol. + public enum MsgTypes: Sendable { + public static let peer = "peer" + public static let join = "join" + public static let leave = "leave" + public static let request = "request" + public static let sync = "sync" + public static let ephemeral = "ephemeral" + public static let error = "error" + public static let unavailable = "doc-unavailable" + public static let remoteHeadsChanged = "remote-heads-changed" + public static let remoteSubscriptionChange = "remote-subscription-change" + } + + case peer(PeerMsg) + case join(JoinMsg) + case leave(LeaveMsg) + case error(ErrorMsg) + case request(RequestMsg) + case sync(SyncMsg) + case unavailable(UnavailableMsg) + // ephemeral + case ephemeral(EphemeralMsg) + // gossip additions + case remoteSubscriptionChange(RemoteSubscriptionChangeMsg) + case remoteHeadsChanged(RemoteHeadsChangedMsg) + // fall-through scenario - unknown message + case unknown(Data) + + var peerMessageType: P2PSyncMessageType { + switch self { + case .peer: + P2PSyncMessageType.peer + case .join: + P2PSyncMessageType.join + case .leave: + P2PSyncMessageType.leave + case .error: + P2PSyncMessageType.syncerror + case .request: + P2PSyncMessageType.request + case .sync: + P2PSyncMessageType.sync + case .unavailable: + P2PSyncMessageType.unavailable + case .ephemeral: + P2PSyncMessageType.ephemeral + case .remoteSubscriptionChange: + P2PSyncMessageType.remoteSubscriptionChange + case .remoteHeadsChanged: + P2PSyncMessageType.remoteHeadsChanged + case .unknown: + P2PSyncMessageType.unknown + } + } +} + +extension SyncV1Msg: CustomDebugStringConvertible { + public var debugDescription: String { + switch self { + case let .peer(interior_msg): + interior_msg.debugDescription + case let .join(interior_msg): + interior_msg.debugDescription + case let .leave(interior_msg): + interior_msg.debugDescription + case let .error(interior_msg): + interior_msg.debugDescription + case let .request(interior_msg): + interior_msg.debugDescription + case let .sync(interior_msg): + interior_msg.debugDescription + case let .unavailable(interior_msg): + interior_msg.debugDescription + case let .ephemeral(interior_msg): + interior_msg.debugDescription + case let .remoteSubscriptionChange(interior_msg): + interior_msg.debugDescription + case let .remoteHeadsChanged(interior_msg): + interior_msg.debugDescription + case let .unknown(data): + "UNKNOWN[data: \(data.hexEncodedString(uppercase: false))]" + } + } +} diff --git a/Packages/automerge-repo/Sources/AutomergeRepo/WeakDocumentRef.swift b/Packages/automerge-repo/Sources/AutomergeRepo/WeakDocumentRef.swift new file mode 100644 index 00000000..bb72266d --- /dev/null +++ b/Packages/automerge-repo/Sources/AutomergeRepo/WeakDocumentRef.swift @@ -0,0 +1,13 @@ +import class Automerge.Document + +/// A weak reference to an Automerge document +/// +/// Allow a global singleton keep references to documents without incurring memory leaks as Documents are opened and +/// closed. +final class WeakDocumentRef { + weak var value: Automerge.Document? + + init(_ value: Automerge.Document? = nil) { + self.value = value + } +} diff --git a/Packages/automerge-repo/Sources/AutomergeRepo/extensions/Data+hexEncodedString.swift b/Packages/automerge-repo/Sources/AutomergeRepo/extensions/Data+hexEncodedString.swift new file mode 100644 index 00000000..b260ec95 --- /dev/null +++ b/Packages/automerge-repo/Sources/AutomergeRepo/extensions/Data+hexEncodedString.swift @@ -0,0 +1,15 @@ +import struct Foundation.Data + +public extension Data { + /// Returns the data as a hex-encoded string. + /// - Parameter uppercase: A Boolean value that indicates whether the hex encoded string uses uppercase letters. + func hexEncodedString(uppercase: Bool = false) -> String { + let format = uppercase ? "%02hhX" : "%02hhx" + return map { String(format: format, $0) }.joined() + } + + /// The data as an array of bytes. + var bytes: [UInt8] { // fancy pretty call: myData.bytes -> [UInt8] + [UInt8](self) + } +} diff --git a/Packages/automerge-repo/Sources/AutomergeRepo/extensions/OSLog+extensions.swift b/Packages/automerge-repo/Sources/AutomergeRepo/extensions/OSLog+extensions.swift new file mode 100644 index 00000000..36e40dc6 --- /dev/null +++ b/Packages/automerge-repo/Sources/AutomergeRepo/extensions/OSLog+extensions.swift @@ -0,0 +1,32 @@ +import OSLog + +extension Logger: @unchecked Sendable {} +// https://forums.developer.apple.com/forums/thread/747816?answerId=781922022#781922022 +// Per Quinn: +// `Logger` should be sendable. Under the covers, it’s an immutable struct with a single +// OSLog property, and that in turn is just a wrapper around the C os_log_t which is +// definitely thread safe. +#if swift(>=6.0) +#warning("Reevaluate whether this decoration is necessary.") +#endif + +extension Logger { + /// Using your bundle identifier is a great way to ensure a unique identifier. + private static let subsystem = Bundle.main.bundleIdentifier! + + /// Logs updates and interaction related to watching for external peer systems. + static let syncController = Logger(subsystem: subsystem, category: "SyncController") + + /// Logs updates and interaction related to the process of synchronization over the network. + static let syncConnection = Logger(subsystem: subsystem, category: "SyncConnection") + + /// Logs updates and interaction related to the process of synchronization over the network. + static let webSocket = Logger(subsystem: subsystem, category: "WebSocket") + + /// Logs updates and interaction related to the process of synchronization over the network. + static let storage = Logger(subsystem: subsystem, category: "storageSubsystem") + + static let repo = Logger(subsystem: subsystem, category: "automerge-repo") + + static let network = Logger(subsystem: subsystem, category: "networkSubsystem") +} diff --git a/MeetingNotes/String+hexEncoding.swift b/Packages/automerge-repo/Sources/AutomergeRepo/extensions/String+hexEncoding.swift similarity index 85% rename from MeetingNotes/String+hexEncoding.swift rename to Packages/automerge-repo/Sources/AutomergeRepo/extensions/String+hexEncoding.swift index 312d0533..bab90b6f 100644 --- a/MeetingNotes/String+hexEncoding.swift +++ b/Packages/automerge-repo/Sources/AutomergeRepo/extensions/String+hexEncoding.swift @@ -1,4 +1,4 @@ -import Foundation +import struct Foundation.Data // https://stackoverflow.com/a/56870030/19477 // Licensed: CC BY-SA 4.0 for [itMaxence](https://stackoverflow.com/users/3328736/itmaxence) @@ -31,21 +31,16 @@ extension String { // "5413".data(using: .hexadecimal) // "0x1234FF".data(using: .hexadecimal) -extension Data { - var bytes: [UInt8] { // fancy pretty call: myData.bytes -> [UInt8] - [UInt8](self) - } - - // Could make a more optimized one~ - func hexa(prefixed isPrefixed: Bool = true) -> String { - self.bytes.reduce(isPrefixed ? "0x" : "") { $0 + String(format: "%02X", $1) } - } -} - +// extension Data { +// Could make a more optimized one~ +// func hexa(prefixed isPrefixed: Bool = true) -> String { +// self.bytes.reduce(isPrefixed ? "0x" : "") { $0 + String(format: "%02X", $1) } +// } // print("000204ff5400".data(using: .hexadecimal)?.hexa() ?? "failed") // OK // print("0x000204ff5400".data(using: .hexadecimal)?.hexa() ?? "failed") // OK // print("541".data(using: .hexadecimal)?.hexa() ?? "failed") // fails // print("5413".data(using: .hexadecimal)?.hexa() ?? "failed") // OK +// } // https://stackoverflow.com/a/73731660/19477 // Licensed: CC BY-SA 4.0 for [Nick](https://stackoverflow.com/users/392986/nick) diff --git a/MeetingNotes/Sync/PeerNetworking/TimeInterval+milliseconds.swift b/Packages/automerge-repo/Sources/AutomergeRepo/extensions/TimeInterval+milliseconds.swift similarity index 86% rename from MeetingNotes/Sync/PeerNetworking/TimeInterval+milliseconds.swift rename to Packages/automerge-repo/Sources/AutomergeRepo/extensions/TimeInterval+milliseconds.swift index eab6ca5f..cd1061b5 100644 --- a/MeetingNotes/Sync/PeerNetworking/TimeInterval+milliseconds.swift +++ b/Packages/automerge-repo/Sources/AutomergeRepo/extensions/TimeInterval+milliseconds.swift @@ -1,4 +1,4 @@ -import Foundation +import struct Foundation.TimeInterval extension TimeInterval { /// Returns a time interval from the number of milliseconds you provide. diff --git a/Packages/automerge-repo/Sources/AutomergeRepo/extensions/UUID+bs58String.swift b/Packages/automerge-repo/Sources/AutomergeRepo/extensions/UUID+bs58String.swift new file mode 100644 index 00000000..0d63db2e --- /dev/null +++ b/Packages/automerge-repo/Sources/AutomergeRepo/extensions/UUID+bs58String.swift @@ -0,0 +1,32 @@ +import Base58Swift +import struct Foundation.Data +import struct Foundation.UUID + +public extension UUID { + /// The contents of the UUID as data. + var data: Data { + var byteblob = Data(count: 16) + byteblob[0] = self.uuid.0 + byteblob[1] = self.uuid.1 + byteblob[2] = self.uuid.2 + byteblob[3] = self.uuid.3 + byteblob[4] = self.uuid.4 + byteblob[5] = self.uuid.5 + byteblob[6] = self.uuid.6 + byteblob[7] = self.uuid.7 + byteblob[8] = self.uuid.8 + byteblob[9] = self.uuid.9 + byteblob[10] = self.uuid.10 + byteblob[11] = self.uuid.11 + byteblob[12] = self.uuid.12 + byteblob[13] = self.uuid.13 + byteblob[14] = self.uuid.14 + byteblob[15] = self.uuid.15 + return byteblob + } + + /// The contents of UUID as a BS58 encoded string. + var bs58String: String { + Base58.base58CheckEncode(self.data.bytes) + } +} diff --git a/Packages/automerge-repo/Tests/AutomergeRepoTests/BS58IdTests.swift b/Packages/automerge-repo/Tests/AutomergeRepoTests/BS58IdTests.swift new file mode 100644 index 00000000..6c0f8c23 --- /dev/null +++ b/Packages/automerge-repo/Tests/AutomergeRepoTests/BS58IdTests.swift @@ -0,0 +1,44 @@ +@testable import AutomergeRepo +import Base58Swift +import XCTest + +final class BS58IdTests: XCTestCase { + func testDataLengthUUIDandAutomergeID() throws { + let exampleUUID = UUID() + let bytes: Data = exampleUUID.data + // example from AutomergeRepo docs/blog + // https://automerge.org/blog/2023/11/06/automerge-repo/ + // let full = "automerge:2j9knpCseyhnK8izDmLpGP5WMdZQ" + let partial = "2j9knpCseyhnK8izDmLpGP5WMdZQ" + XCTAssertEqual(Base58.base58Decode(partial)?.count, 20) + if let decodedBytes = Base58.base58CheckDecode(partial) { + // both are 16 bytes of data + XCTAssertEqual(bytes.count, Data(decodedBytes).count) + } + } + + func testDisplayingUUIDWithBase58() throws { + let exampleUUID = try XCTUnwrap(UUID(uuidString: "1654A0B5-43B9-48FF-B7FB-83F58F4D1D75")) + // print("hexencoded: \(exampleUUID.data.hexEncodedString())") + XCTAssertEqual("1654a0b543b948ffb7fb83f58f4d1d75", exampleUUID.data.hexEncodedString()) + let bs58Converted = Base58.base58CheckEncode(exampleUUID.data.bytes) + // print("Converted: \(bs58Converted)") + XCTAssertEqual("K3YptshN5CcFZNpnnXcStizSNPU", bs58Converted) + XCTAssertEqual(exampleUUID.bs58String, bs58Converted) + } + + func testDataInAndOutWithBase58() throws { + // let full = "automerge:2j9knpCseyhnK8izDmLpGP5WMdZQ" + let partial = "2j9knpCseyhnK8izDmLpGP5WMdZQ" + if let decodedBytes = Base58.base58CheckDecode(partial) { + print(decodedBytes.count) + // AutomergeID is 16 bytes of data + XCTAssertEqual(16, Data(decodedBytes).count) + XCTAssertEqual("7bf18580944c450ea740c1f23be047ca", Data(decodedBytes).hexEncodedString()) + // print(Data(decodedBytes).hexEncodedString()) + + let reversed = Base58.base58CheckEncode(decodedBytes) + XCTAssertEqual(reversed, partial) + } + } +} diff --git a/Packages/automerge-repo/Tests/AutomergeRepoTests/BaseRepoTests.swift b/Packages/automerge-repo/Tests/AutomergeRepoTests/BaseRepoTests.swift new file mode 100644 index 00000000..a03da444 --- /dev/null +++ b/Packages/automerge-repo/Tests/AutomergeRepoTests/BaseRepoTests.swift @@ -0,0 +1,138 @@ +import Automerge +@testable import AutomergeRepo +import AutomergeUtilities +import XCTest + +final class BaseRepoTests: XCTestCase { + var repo: Repo! + + override func setUp() async throws { + repo = Repo(sharePolicy: SharePolicies.agreeable) + } + + func testMostBasicRepoStartingPoints() async throws { + // Repo + // property: peers [PeerId] - all (currently) connected peers + let peers = await repo.peers() + XCTAssertEqual(peers, []) + + // let peerId = await repo.peerId + // print(peerId) + + // - func storageId() -> StorageId (async) + let storageId = await repo.storageId() + XCTAssertNil(storageId) + + let knownIds = await repo.documentIds() + XCTAssertEqual(knownIds, []) + } + + func testCreate() async throws { + let newDoc = try await repo.create() + XCTAssertNotNil(newDoc) + let knownIds = await repo.documentIds() + XCTAssertEqual(knownIds.count, 1) + } + + func testCreateWithId() async throws { + let myId = DocumentId() + let handle = try await repo.create(id: myId) + XCTAssertEqual(myId, handle.id) + + let knownIds = await repo.documentIds() + XCTAssertEqual(knownIds.count, 1) + XCTAssertEqual(knownIds[0], myId) + } + + func testCreateWithExistingDoc() async throws { + let handle = try await repo.create(doc: Document()) + var knownIds = await repo.documentIds() + XCTAssertEqual(knownIds.count, 1) + XCTAssertEqual(knownIds[0], handle.id) + + let myId = DocumentId() + let _ = try await repo.create(doc: Document(), id: myId) + knownIds = await repo.documentIds() + XCTAssertEqual(knownIds.count, 2) + } + + func testFind() async throws { + let myId = DocumentId() + let handle = try await repo.create(id: myId) + XCTAssertEqual(myId, handle.id) + + let foundDoc = try await repo.find(id: myId) + XCTAssertEqual(foundDoc.doc.actor, handle.doc.actor) + } + + func testFindFailed() async throws { + do { + let _ = try await repo.find(id: DocumentId()) + XCTFail() + } catch {} + } + + func testDelete() async throws { + let myId = DocumentId() + let _ = try await repo.create(id: myId) + var knownIds = await repo.documentIds() + XCTAssertEqual(knownIds.count, 1) + + try await repo.delete(id: myId) + knownIds = await repo.documentIds() + XCTAssertEqual(knownIds.count, 0) + + do { + let _ = try await repo.find(id: DocumentId()) + XCTFail() + } catch {} + } + + func testClone() async throws { + let myId = DocumentId() + let handle = try await repo.create(id: myId) + XCTAssertEqual(myId, handle.id) + + let clonedHandle = try await repo.clone(id: myId) + XCTAssertNotEqual(handle.id, clonedHandle.id) + XCTAssertNotEqual(handle.doc.actor, clonedHandle.doc.actor) + + let knownIds = await repo.documentIds() + XCTAssertEqual(knownIds.count, 2) + } + + func testExportFailureUnknownId() async throws { + do { + _ = try await repo.export(id: DocumentId()) + XCTFail() + } catch {} + } + + func testExport() async throws { + let newDoc = try RepoHelpers.documentWithData() + let newHandle = try await repo.create(doc: newDoc) + + let exported = try await repo.export(id: newHandle.id) + XCTAssertEqual(exported, newDoc.save()) + } + + func testImport() async throws { + let newDoc = try RepoHelpers.documentWithData() + + let handle = try await repo.import(data: newDoc.save()) + XCTAssertTrue(RepoHelpers.equalContents(doc1: handle.doc, doc2: newDoc)) + } + + // TBD: + // - func storageIdForPeer(peerId) -> StorageId + // - func subscribeToRemotes([StorageId]) + + func testRepoSetup() async throws { + let repoA = Repo(sharePolicy: SharePolicies.agreeable) + let storage = await InMemoryStorage() + await repoA.addStorageProvider(storage) + + let storageId = await repoA.storageId() + XCTAssertNotNil(storageId) + } +} diff --git a/MeetingNotesTests/CBORExperiments.swift b/Packages/automerge-repo/Tests/AutomergeRepoTests/CBORExperiments.swift similarity index 75% rename from MeetingNotesTests/CBORExperiments.swift rename to Packages/automerge-repo/Tests/AutomergeRepoTests/CBORExperiments.swift index 65f49ded..d302c8d1 100644 --- a/MeetingNotesTests/CBORExperiments.swift +++ b/Packages/automerge-repo/Tests/AutomergeRepoTests/CBORExperiments.swift @@ -1,10 +1,4 @@ -// -// CBORExperiments.swift -// MeetingNotesTests -// -// Created by Joseph Heck on 1/28/24. -// - +import AutomergeRepo import PotentCBOR import XCTest @@ -37,6 +31,15 @@ final class CBORExperiments: XCTestCase { static let encoder = CBOREncoder() static let decoder = CBORDecoder() + func testCBORSerialization() throws { + let peerMsg = SyncV1Msg.PeerMsg(senderId: "senderUUID", targetId: "targetUUID", storageId: "something") + let encodedPeerMsg = try SyncV1Msg.encode(peerMsg) + + let x = try CBORSerialization.cbor(from: encodedPeerMsg) + XCTAssertEqual(x.mapValue?["type"]?.utf8StringValue, SyncV1Msg.MsgTypes.peer) + // print("CBOR data: \(x)") + } + func testDecodingWithAdditionalData() throws { let data = try Self.encoder.encode(ExtendedMessage( first: "one", diff --git a/Packages/automerge-repo/Tests/AutomergeRepoTests/DocHandleTests.swift b/Packages/automerge-repo/Tests/AutomergeRepoTests/DocHandleTests.swift new file mode 100644 index 00000000..fd652668 --- /dev/null +++ b/Packages/automerge-repo/Tests/AutomergeRepoTests/DocHandleTests.swift @@ -0,0 +1,57 @@ +import Automerge +@testable import AutomergeRepo +import XCTest + +final class DocHandleTests: XCTestCase { + func testNewDocHandleData() async throws { + let id = DocumentId() + let new = InternalDocHandle(id: id, isNew: true) + + XCTAssertEqual(new.id, id) + XCTAssertEqual(new.state, .idle) + XCTAssertEqual(new.isDeleted, false) + XCTAssertEqual(new.isReady, false) + XCTAssertEqual(new.isUnavailable, false) + XCTAssertEqual(new.remoteHeads.count, 0) + XCTAssertNil(new.doc) + } + + func testNewDocHandleDataWithDocument() async throws { + let id = DocumentId() + let new = InternalDocHandle(id: id, isNew: true, initialValue: Document()) + + XCTAssertEqual(new.id, id) + XCTAssertEqual(new.state, .loading) + XCTAssertEqual(new.isDeleted, false) + XCTAssertEqual(new.isReady, false) + XCTAssertEqual(new.isUnavailable, false) + XCTAssertEqual(new.remoteHeads.count, 0) + XCTAssertNotNil(new.doc) + } + + func testDocHandleRequestData() async throws { + let id = DocumentId() + let new = InternalDocHandle(id: id, isNew: false) + + XCTAssertEqual(new.id, id) + XCTAssertEqual(new.state, .idle) + XCTAssertEqual(new.isDeleted, false) + XCTAssertEqual(new.isReady, false) + XCTAssertEqual(new.isUnavailable, false) + XCTAssertEqual(new.remoteHeads.count, 0) + XCTAssertNil(new.doc) + } + + func testDocHandleRequestDataWithData() async throws { + let id = DocumentId() + let new = InternalDocHandle(id: id, isNew: false, initialValue: Document()) + + XCTAssertEqual(new.id, id) + XCTAssertEqual(new.state, .ready) + XCTAssertEqual(new.isDeleted, false) + XCTAssertEqual(new.isReady, true) + XCTAssertEqual(new.isUnavailable, false) + XCTAssertEqual(new.remoteHeads.count, 0) + XCTAssertNotNil(new.doc) + } +} diff --git a/Packages/automerge-repo/Tests/AutomergeRepoTests/DocumentIdTests.swift b/Packages/automerge-repo/Tests/AutomergeRepoTests/DocumentIdTests.swift new file mode 100644 index 00000000..28e164af --- /dev/null +++ b/Packages/automerge-repo/Tests/AutomergeRepoTests/DocumentIdTests.swift @@ -0,0 +1,52 @@ +@testable import AutomergeRepo +import Base58Swift +import XCTest + +final class DocumentIdTests: XCTestCase { + func testInvalidDocumentIdString() async throws { + XCTAssertNil(DocumentId("some random string")) + } + + func testDocumentId() async throws { + let someUUID = UUID() + let id = DocumentId(someUUID) + XCTAssertEqual(id.description, someUUID.bs58String) + } + + func testDocumentIdFromString() async throws { + let someUUID = UUID() + let bs58String = someUUID.bs58String + let id = DocumentId(bs58String) + XCTAssertEqual(id?.description, bs58String) + + let invalidOptionalString: String? = "SomeRandomNonBS58String" + XCTAssertNil(DocumentId(invalidOptionalString)) + + let invalidString = "SomeRandomNonBS58String" + XCTAssertNil(DocumentId(invalidString)) + + let optionalString: String? = bs58String + XCTAssertEqual(DocumentId(optionalString)?.description, bs58String) + + XCTAssertNil(DocumentId(nil)) + } + + func testInvalidTooMuchDataDocumentId() async throws { + let tooBig = [UInt8](UUID().data + UUID().data) + let bs58StringFromData = Base58.base58CheckEncode(tooBig) + let tooLargeOptionalString: String? = bs58StringFromData + XCTAssertNil(DocumentId(bs58StringFromData)) + XCTAssertNil(DocumentId(tooLargeOptionalString)) + + let optionalString: String? = bs58StringFromData + XCTAssertNil(DocumentId(optionalString)) + } + + func testComparisonOnData() async throws { + let first = DocumentId() + let second = DocumentId() + let compareFirstAndSecond = first < second + let compareFirstAndSecondDescription = first.description < second.description + XCTAssertEqual(compareFirstAndSecond, compareFirstAndSecondDescription) + } +} diff --git a/Packages/automerge-repo/Tests/AutomergeRepoTests/IntegrationTests/RepoWebsocketIntegrationTests.swift b/Packages/automerge-repo/Tests/AutomergeRepoTests/IntegrationTests/RepoWebsocketIntegrationTests.swift new file mode 100644 index 00000000..bbff92cd --- /dev/null +++ b/Packages/automerge-repo/Tests/AutomergeRepoTests/IntegrationTests/RepoWebsocketIntegrationTests.swift @@ -0,0 +1,105 @@ +import Automerge +import AutomergeRepo +import AutomergeUtilities +import OSLog +import XCTest + +// NOTE(heckj): This integration test expects that you have a websocket server with the +// Automerge-repo sync protocol running at localhost:3030. If you're testing from the local +// repository, run the `./scripts/interop.sh` script to start up a local instance to +// respond. + +final class RepoWebsocketIntegrationTests: XCTestCase { + private static let subsystem = Bundle.main.bundleIdentifier! + + static let test = Logger(subsystem: subsystem, category: "WebSocketSyncIntegrationTests") + let syncDestination = "ws://localhost:3030/" + // Switch to the following line to run a test against the public hosted automerge-repo instance +// let syncDestination = "wss://sync.automerge.org/" + + override func setUp() async throws { + let isWebSocketConnectable = await webSocketAvailable(destination: syncDestination) + try XCTSkipUnless(isWebSocketConnectable, "websocket unavailable for integration test") + } + + override func tearDown() async throws { + // teardown + } + + func webSocketAvailable(destination: String) async -> Bool { + guard let url = URL(string: destination) else { + Self.test.error("invalid URL: \(destination, privacy: .public) - endpoint unavailable") + return false + } + // establishes the websocket + let request = URLRequest(url: url) + let ws: URLSessionWebSocketTask = URLSession.shared.webSocketTask(with: request) + ws.resume() + Self.test.info("websocket to \(destination, privacy: .public) prepped, sending ping") + do { + try await ws.sendPing() + Self.test.info("PING OK - returning true") + ws.cancel(with: .normalClosure, reason: nil) + return true + } catch { + Self.test.error("PING FAILED: \(error.localizedDescription, privacy: .public) - returning false") + ws.cancel(with: .abnormalClosure, reason: nil) + return false + } + } + + func testSync() async throws { + // document structure for test + struct ExampleStruct: Identifiable, Codable, Hashable { + let id: UUID + var title: String + var discussion: AutomergeText + + init(title: String, discussion: String) { + self.id = UUID() + self.title = title + self.discussion = AutomergeText(discussion) + } + } + + // set up repo (with a client-websocket) + let repo = Repo(sharePolicy: SharePolicies.agreeable) + let websocket = WebSocketProvider() + await repo.addNetworkAdapter(adapter: websocket) + + // add the document to the repo + let handle: DocHandle = try await repo.create(doc: Document(), id: DocumentId()) + + // initial setup and encoding of Automerge doc to sync it + let encoder = AutomergeEncoder(doc: handle.doc) + let model = ExampleStruct(title: "new item", discussion: "editable text") + try encoder.encode(model) + + let url = try XCTUnwrap(URL(string: syncDestination)) + try await websocket.connect(to: url) + + // With the websocket protocol, we don't get confirmation of a sync being complete - + // if the other side has everything and nothing new, they just won't send a response + // back. In that case, we don't get any further responses - but we don't _know_ that + // it's complete. In an initial sync there will always be at least one response, but + // we can't quite count on this always being an initial sync... so I'm shimming in a + // short "wait" here to leave the background tasks that receive WebSocket messages + // running to catch any updates, and hoping that'll be enough time to complete it. + try await Task.sleep(for: .seconds(5)) + await websocket.disconnect() + + // Create a second, empty repo that doesn't have the document and request it + + // set up repo (with a client-websocket) + let repoTwo = Repo(sharePolicy: SharePolicies.agreeable) + let websocketTwo = WebSocketProvider() + await repoTwo.addNetworkAdapter(adapter: websocketTwo) + + // connect the repo to the external automerge-repo + try await websocketTwo.connect(to: url) + + let foundDocHandle = try await repoTwo.find(id: handle.id) + XCTAssertEqual(foundDocHandle.id, handle.id) + XCTAssertTrue(RepoHelpers.equalContents(doc1: foundDocHandle.doc, doc2: handle.doc)) + } +} diff --git a/MeetingNotesTests/URLSessionWebSocketTask+sendPing.swift b/Packages/automerge-repo/Tests/AutomergeRepoTests/IntegrationTests/URLSessionWebSocketTask+sendPing.swift similarity index 100% rename from MeetingNotesTests/URLSessionWebSocketTask+sendPing.swift rename to Packages/automerge-repo/Tests/AutomergeRepoTests/IntegrationTests/URLSessionWebSocketTask+sendPing.swift diff --git a/MeetingNotesTests/WebSocketSyncIntegrationTests.swift b/Packages/automerge-repo/Tests/AutomergeRepoTests/IntegrationTests/WebSocketSyncIntegrationTests.swift similarity index 96% rename from MeetingNotesTests/WebSocketSyncIntegrationTests.swift rename to Packages/automerge-repo/Tests/AutomergeRepoTests/IntegrationTests/WebSocketSyncIntegrationTests.swift index 7acb5ae6..ef76137b 100644 --- a/MeetingNotesTests/WebSocketSyncIntegrationTests.swift +++ b/Packages/automerge-repo/Tests/AutomergeRepoTests/IntegrationTests/WebSocketSyncIntegrationTests.swift @@ -6,6 +6,7 @@ // import Automerge +import AutomergeRepo import AutomergeUtilities import OSLog import XCTest @@ -76,8 +77,8 @@ final class WebSocketSyncIntegrationTests: XCTestCase { // establish and sync the document // SwiftUI does it in a two-step: define and then add data through onAppear: - let websocket = WebsocketSyncConnection(nil, id: nil) - websocket.registerDocument(document, id: documentId) + let websocket = await WebsocketSyncConnection(nil, id: nil) + await websocket.registerDocument(document, id: documentId) print("SYNCING DOCUMENT: \(documentId.description)") try await websocket.connect(syncDestination) diff --git a/Packages/automerge-repo/Tests/AutomergeRepoTests/RepoHelpers.swift b/Packages/automerge-repo/Tests/AutomergeRepoTests/RepoHelpers.swift new file mode 100644 index 00000000..3d929e4c --- /dev/null +++ b/Packages/automerge-repo/Tests/AutomergeRepoTests/RepoHelpers.swift @@ -0,0 +1,30 @@ +import Automerge +@testable import AutomergeRepo +import AutomergeUtilities +import Foundation + +public enum RepoHelpers { + static func documentWithData() throws -> Document { + let newDoc = Document() + let txt = try newDoc.putObject(obj: .ROOT, key: "words", ty: .Text) + try newDoc.updateText(obj: txt, value: "Hello World!") + return newDoc + } + + static func docHandleWithData() throws -> DocHandle { + let newDoc = Document() + let txt = try newDoc.putObject(obj: .ROOT, key: "words", ty: .Text) + try newDoc.updateText(obj: txt, value: "Hello World!") + return DocHandle(id: DocumentId(), doc: newDoc) + } + + static func equalContents(doc1: Document, doc2: Document) -> Bool { + do { + let doc1Contents = try doc1.parseToSchema(doc1, from: .ROOT) + let doc2Contents = try doc2.parseToSchema(doc1, from: .ROOT) + return doc1Contents == doc2Contents + } catch { + return false + } + } +} diff --git a/Packages/automerge-repo/Tests/AutomergeRepoTests/SharePolicyTests.swift b/Packages/automerge-repo/Tests/AutomergeRepoTests/SharePolicyTests.swift new file mode 100644 index 00000000..227ec8de --- /dev/null +++ b/Packages/automerge-repo/Tests/AutomergeRepoTests/SharePolicyTests.swift @@ -0,0 +1,12 @@ +@testable import AutomergeRepo +import XCTest + +final class SharePolicyTests: XCTestCase { + func testSharePolicy() async throws { + let agreeableShareResult = await SharePolicies.agreeable.share(peer: "A", docId: DocumentId()) + XCTAssertTrue(agreeableShareResult) + + let readOnlyShareResult = await SharePolicies.readonly.share(peer: "A", docId: DocumentId()) + XCTAssertFalse(readOnlyShareResult) + } +} diff --git a/Packages/automerge-repo/Tests/AutomergeRepoTests/StorageSubsystemTests.swift b/Packages/automerge-repo/Tests/AutomergeRepoTests/StorageSubsystemTests.swift new file mode 100644 index 00000000..e84cb622 --- /dev/null +++ b/Packages/automerge-repo/Tests/AutomergeRepoTests/StorageSubsystemTests.swift @@ -0,0 +1,121 @@ +import Automerge +@testable import AutomergeRepo +import AutomergeUtilities +import XCTest + +final class StorageSubsystemTests: XCTestCase { + var subsystem: DocumentStorage! + var testStorageProvider: InMemoryStorage! + + override func setUp() async throws { + let storageProvider = await InMemoryStorage() + let incrementalKeys = await storageProvider.incrementalKeys() + let docIds = await storageProvider.storageKeys() + XCTAssertEqual(docIds.count, 0) + XCTAssertEqual(incrementalKeys.count, 0) + + subsystem = DocumentStorage(storageProvider) + testStorageProvider = storageProvider + } + + func assertCounts(docIds: Int, incrementals: Int) async { + let countOfIncrementalKeys = await testStorageProvider?.incrementalKeys().count + let countOfDocumentIdKeys = await testStorageProvider?.storageKeys().count + XCTAssertEqual(countOfDocumentIdKeys, docIds) + XCTAssertEqual(countOfIncrementalKeys, incrementals) + } + + func docDataSize(id: DocumentId) async -> Int { + await testStorageProvider?.load(id: id)?.count ?? 0 + } + + func combinedIncData(id: DocumentId) async -> Int { + if let inc = await testStorageProvider?.loadRange(id: id, prefix: subsystem.chunkNamespace) { + return inc.reduce(0) { partialResult, data in + partialResult + data.count + } + } + return 0 + } + + func testSubsystemSetup() async throws { + XCTAssertNotNil(subsystem) + let newDoc = Document() + let newDocId = DocumentId() + + try await subsystem.saveDoc(id: newDocId, doc: newDoc) + await assertCounts(docIds: 0, incrementals: 1) + + let combinedKeys = await testStorageProvider?.incrementalKeys() + XCTAssertEqual(combinedKeys?.count, 1) + XCTAssertEqual(combinedKeys?[0].id, newDocId) + XCTAssertEqual(combinedKeys?[0].prefix, "incrChanges") + let incData: [Data]? = await testStorageProvider?.loadRange(id: newDocId, prefix: "incrChanges") + let incDataUnwrapped = try XCTUnwrap(incData) + XCTAssertEqual(incDataUnwrapped.count, 1) + XCTAssertEqual(incDataUnwrapped[0].count, 0) + + let txt = try newDoc.putObject(obj: .ROOT, key: "words", ty: .Text) + try await subsystem.saveDoc(id: newDocId, doc: newDoc) + + await assertCounts(docIds: 0, incrementals: 1) + var incSize = await combinedIncData(id: newDocId) + XCTAssertEqual(incSize, 58) + + try newDoc.updateText(obj: txt, value: "Hello World!") + try await subsystem.saveDoc(id: newDocId, doc: newDoc) + + await assertCounts(docIds: 1, incrementals: 1) + incSize = await combinedIncData(id: newDocId) + var docSize = await docDataSize(id: newDocId) + XCTAssertEqual(docSize, 176) + XCTAssertEqual(incSize, 0) + + try await subsystem.compact(id: newDocId, doc: newDoc) + + await assertCounts(docIds: 1, incrementals: 1) + incSize = await combinedIncData(id: newDocId) + docSize = await docDataSize(id: newDocId) + XCTAssertEqual(docSize, 176) + XCTAssertEqual(incSize, 0) +// if let incrementals = await testStorageProvider?.loadRange(id: newDocId, prefix: subsystem.chunkNamespace) { +// print(incrementals) +// } + } + + func testSubsystemLoadDoc() async throws { + let newDoc = try RepoHelpers.documentWithData() + let newDocId = DocumentId() + try await subsystem.saveDoc(id: newDocId, doc: newDoc) + + let loadedDoc = try await subsystem.loadDoc(id: newDocId) + + XCTAssertTrue(RepoHelpers.equalContents(doc1: newDoc, doc2: loadedDoc)) + } + + func testSubsystemPurgeDoc() async throws { + let newDoc = try RepoHelpers.documentWithData() + let newDocId = DocumentId() + try await subsystem.saveDoc(id: newDocId, doc: newDoc) + + await assertCounts(docIds: 0, incrementals: 1) + let incSize = await combinedIncData(id: newDocId) + let docSize = await docDataSize(id: newDocId) + XCTAssertEqual(docSize, 0) + XCTAssertEqual(incSize, 106) + + try await subsystem.compact(id: newDocId, doc: newDoc) + await assertCounts(docIds: 1, incrementals: 1) + let compactedIncSize = await combinedIncData(id: newDocId) + let compactedDocSize = await docDataSize(id: newDocId) + XCTAssertEqual(compactedDocSize, 170) + XCTAssertEqual(compactedIncSize, 0) + + try await subsystem.purgeDoc(id: newDocId) + await assertCounts(docIds: 0, incrementals: 1) + let purgedIncSize = await combinedIncData(id: newDocId) + let purgedDocSize = await docDataSize(id: newDocId) + XCTAssertEqual(purgedDocSize, 0) + XCTAssertEqual(purgedIncSize, 0) + } +} diff --git a/Packages/automerge-repo/Tests/AutomergeRepoTests/TestNetworkProviders/InMemoryNetwork.swift b/Packages/automerge-repo/Tests/AutomergeRepoTests/TestNetworkProviders/InMemoryNetwork.swift new file mode 100644 index 00000000..360f40cd --- /dev/null +++ b/Packages/automerge-repo/Tests/AutomergeRepoTests/TestNetworkProviders/InMemoryNetwork.swift @@ -0,0 +1,488 @@ +import Automerge +import AutomergeRepo +import Foundation +import OSLog +import Tracing + +extension Logger { + static let testNetwork = Logger(subsystem: "InMemoryNetwork", category: "testNetwork") +} + +enum InMemoryNetworkErrors: Sendable { + public struct NoSuchEndpoint: Sendable, LocalizedError { + let name: String + public var errorDescription: String { + "Endpoint \(name) doesn't exist." + } + } + + public struct EndpointNotListening: Sendable, LocalizedError { + let name: String + public var errorDescription: String { + "Endpoint \(name) isn't listening for connections." + } + } +} + +// Tracing experimentation +struct InMemoryNetworkMsgBaggageManipulator: Injector, Extractor { + func inject(_ value: String, forKey key: String, into request: inout InMemoryNetworkMsg) { + request.appendHeader(key: key, value: value) + } + + func extract(key: String, from carrier: InMemoryNetworkMsg) -> String? { + if let valueForKey = carrier.headers[key] { + return valueForKey + } + return nil + } +} + +/// Emulates a protocol that supports headers or other embedded details +public struct InMemoryNetworkMsg: Sendable, CustomDebugStringConvertible { + var headers: [String: String] = [:] + var payload: SyncV1Msg + + public var debugDescription: String { + var str = "" + for (k, v) in headers { + str.append("[\(k):\(v)]") + } + str.append(" - \(payload.debugDescription)") + return str + } + + init(headers: [String: String] = [:], _ payload: SyncV1Msg) { + self.headers = headers + self.payload = payload + } + + mutating func appendHeader(key: String, value: String) { + headers[key] = value + } +} + +@InMemoryNetwork +public final class InMemoryNetworkConnection { + public var description: String { + get async { + let i = initiatingEndpoint.endpointName ?? "?" + let j = receivingEndpoint.endpointName ?? "?" + return "\(id.uuidString) [\(i)(\(initiatingEndpoint.peerId ?? "unconfigured"))] --> [\(j)(\(receivingEndpoint.peerId ?? "unconfigured"))])" + } + } + + let id: UUID + let initiatingEndpoint: InMemoryNetworkEndpoint + let receivingEndpoint: InMemoryNetworkEndpoint + let transferLatency: Duration? + let trace: Bool + + init(from: InMemoryNetworkEndpoint, to: InMemoryNetworkEndpoint, latency: Duration?, trace: Bool) { + self.id = UUID() + self.initiatingEndpoint = from + self.receivingEndpoint = to + self.transferLatency = latency + self.trace = trace + } + + func close() async { + await self.initiatingEndpoint.connectionTerminated(self.id) + await self.receivingEndpoint.connectionTerminated(self.id) + } + + func send(sender: String, msg: InMemoryNetworkMsg) async { + do { + if initiatingEndpoint.endpointName == sender { + if let latency = transferLatency { + try await Task.sleep(for: latency) + if trace { + Logger.testNetwork + .trace( + "XMIT[\(self.id.bs58String)] \(msg.debugDescription) from \(sender) with delay \(latency)" + ) + } + } else { + if trace { + Logger.testNetwork.trace("XMIT[\(self.id.bs58String)] \(msg.debugDescription) from \(sender)") + } + } + await receivingEndpoint.receiveMessage(msg: msg.payload) + } else if receivingEndpoint.endpointName == sender { + if let latency = transferLatency { + try await Task.sleep(for: latency) + if trace { + Logger.testNetwork + .trace( + "XMIT[\(self.id.bs58String)] \(msg.debugDescription) from \(sender) with delay \(latency)" + ) + } + } else { + if trace { + Logger.testNetwork.trace("XMIT[\(self.id.bs58String)] \(msg.debugDescription) from \(sender)") + } + } + await initiatingEndpoint.receiveMessage(msg: msg.payload) + } + } catch { + Logger.testNetwork.error("Failure during latency sleep: \(error.localizedDescription)") + } + } +} + +@InMemoryNetwork // isolate all calls to this class using the InMemoryNetwork global actor +public final class InMemoryNetworkEndpoint: NetworkProvider { + public typealias ProviderConfiguration = BasicNetworkConfiguration + public typealias NetworkConnectionEndpoint = String + + public struct BasicNetworkConfiguration: Sendable { + let listeningNetwork: Bool + let name: String + } + + public init(_ config: BasicNetworkConfiguration) async { + self.peeredConnections = [] + self._connections = [] + self.listening = false + + self.delegate = nil + self.peerId = nil + self.peerMetadata = nil + + // testing spies + self.received_messages = [] + self.sent_messages = [] + // logging control + self.logReceivedMessages = false + self.config = config + if config.listeningNetwork { + self.listening = true + } + } + + public func configure(_ config: BasicNetworkConfiguration) async { + self.config = config + if config.listeningNetwork { + self.listening = true + } + } + + public var debugDescription: String { + if let peerId = self.peerId { + "In-Memory Network: \(peerId)" + } else { + "Unconfigured In-Memory Network" + } + } + + public var peeredConnections: [PeerConnection] + var _connections: [InMemoryNetworkConnection] + var delegate: (any NetworkEventReceiver)? + var config: BasicNetworkConfiguration + var listening: Bool + var logReceivedMessages: Bool + + public var peerId: PEER_ID? + var peerMetadata: PeerMetadata? + + var received_messages: [SyncV1Msg] + var sent_messages: [SyncV1Msg] + + func wipe() { + self.peeredConnections = [] + self._connections = [] + self.received_messages = [] + self.sent_messages = [] + } + + public func logReceivedMessages(_ enableLogging: Bool) { + self.logReceivedMessages = enableLogging + } + + public var endpointName: String? { + self.config.name + } + + public func acceptNewConnection(_ connection: InMemoryNetworkConnection) async { + withSpan("accept-new-connection") { _ in + if listening { + self._connections.append(connection) + } else { + fatalError("Can't accept connection on a non-listening interface") + } + } + } + + public func connectionTerminated(_ id: UUID) async { + withSpan("connection-terminated") { _ in + self._connections.removeAll { connection in + connection.id == id + } + } + } + + public func connect(to: String) async throws { + guard let name = self.endpointName, + let peerId = self.peerId, + let peerMetadata = self.peerMetadata + else { + fatalError("Can't connect an unconfigured network") + } + // aka "activate" + try await withSpan("connect") { span in + + let connection = try await InMemoryNetwork.shared.connect(from: name, to: to, latency: nil) + + self._connections.append(connection) + + let attributes: [String: SpanAttribute] = [ + "type": SpanAttribute(stringLiteral: "join"), + "peerId": SpanAttribute(stringLiteral: peerId), + ] + + span.addEvent(SpanEvent(name: "message send", attributes: SpanAttributes(attributes))) + + await connection.send( + sender: name, + + msg: InMemoryNetworkMsg( + .join(.init(senderId: peerId, metadata: peerMetadata)) + ) + ) + } + } + + public func disconnect() async { + await withSpan("disconnect") { _ in + for connection in _connections { + await connection.close() + } + _connections = [] + peeredConnections = [] + } + } + + func receiveWrappedMessage(msg: InMemoryNetworkMsg) async { + await withSpan("receiveWrappedMessage") { _ in + if var context = ServiceContext.current { + InstrumentationSystem.instrument.extract( + msg, + into: &context, + using: InMemoryNetworkMsgBaggageManipulator() + ) + } + await self.receiveMessage(msg: msg.payload) + } + } + + func receiveMessage(msg: SyncV1Msg) async { + await withSpan("receiveWrappedMessage") { span in + guard let peerId = self.peerId else { + fatalError("Attempting to receive message with unconfigured network adapter") + } + if logReceivedMessages { + Logger.testNetwork.trace("\(peerId) RECEIVED MSG: \(msg.debugDescription)") + } + received_messages.append(msg) + switch msg { + case let .leave(msg): + span.addEvent(SpanEvent(name: "leave msg received")) + await self.delegate?.receiveEvent(event: .close) + _connections.removeAll { connection in + connection.initiatingEndpoint.peerId == msg.senderId || + connection.receivingEndpoint.peerId == msg.senderId + } + peeredConnections.removeAll { peerConnection in + peerConnection.peerId == msg.senderId + } + case let .join(msg): + if listening { + span.addEvent(SpanEvent(name: "join msg received")) + await self.delegate?.receiveEvent( + event: .peerCandidate( + payload: .init( + peerId: msg.senderId, + peerMetadata: msg.peerMetadata + ) + ) + ) + peeredConnections.append(PeerConnection(peerId: msg.senderId, peerMetadata: msg.peerMetadata)) + span.addEvent(SpanEvent(name: "replying with peer msg")) + await self.send( + message: .peer( + .init( + senderId: peerId, + targetId: msg.senderId, + storageId: self.peerMetadata?.storageId, + ephemeral: self.peerMetadata?.isEphemeral ?? true + ) + ), + to: msg.senderId + ) + } else { + fatalError("non-listening endpoint received a join message") + } + case let .peer(msg): + span.addEvent(SpanEvent(name: "peer msg received")) + peeredConnections.append(PeerConnection(peerId: msg.senderId, peerMetadata: msg.peerMetadata)) + await self.delegate?.receiveEvent( + event: .ready( + payload: .init( + peerId: msg.senderId, + peerMetadata: msg.peerMetadata + ) + ) + ) + default: + if self.delegate == nil, logReceivedMessages { + Logger.testNetwork + .warning("ADAPTER \(self.debugDescription) has no delegate, ignoring received message") + } + span.addEvent(SpanEvent(name: "forwarding received msg to delegate")) + await self.delegate?.receiveEvent(event: .message(payload: msg)) + } + } + } + + public func send(message: SyncV1Msg, to: PEER_ID?) async { + guard let endpointName = self.endpointName else { + fatalError("Can't send without a configured endpoint") + } + await withSpan("send message") { span in + sent_messages.append(message) + + var wrappedMsg = InMemoryNetworkMsg(message) + if let context = ServiceContext.current { + InstrumentationSystem.instrument.inject( + context, + into: &wrappedMsg, + using: InMemoryNetworkMsgBaggageManipulator() + ) + } + + if let peerTarget = to { + let connectionsWithPeer = _connections.filter { connection in + connection.initiatingEndpoint.peerId == peerTarget || + connection.receivingEndpoint.peerId == peerTarget + } + for connection in connectionsWithPeer { + span.addEvent( + SpanEvent(name: "send message to peer", attributes: SpanAttributes([ + "msg": SpanAttribute(stringLiteral: wrappedMsg.debugDescription), + "destination": SpanAttribute(stringLiteral: peerTarget), + ])) + ) + await connection.send(sender: endpointName, msg: wrappedMsg) + } + } else { + // broadcast + for connection in _connections { + await connection.send(sender: endpointName, msg: wrappedMsg) + } + } + } + } + + public func setDelegate( + _ delegate: any NetworkEventReceiver, + as peer: PEER_ID, + with metadata: AutomergeRepo.PeerMetadata? + ) async { + self.peerId = peer + self.peerMetadata = metadata + self.delegate = delegate + } +} + +/// A Test network that operates in memory +/// +/// Acts akin to an outbound connection - doesn't "connect" and trigger messages until you explicitly ask +@globalActor public actor InMemoryNetwork { + public static let shared = InMemoryNetwork() + + private init() {} + + var endpoints: [String: InMemoryNetworkEndpoint] = [:] + var simulatedConnections: [InMemoryNetworkConnection] = [] + var enableTracing: Bool = false + + public func traceConnections(_ enableTracing: Bool) { + self.enableTracing = enableTracing + } + + public func networkEndpoint(named: String) -> InMemoryNetworkEndpoint? { + let x = endpoints[named] + return x + } + + public func connections() -> [InMemoryNetworkConnection] { + simulatedConnections + } + + // MARK: TESTING SPECIFIC API + + public func createNetworkEndpoint( + config: InMemoryNetworkEndpoint.BasicNetworkConfiguration + ) async -> InMemoryNetworkEndpoint { + let x = await InMemoryNetworkEndpoint(config) + endpoints[config.name] = x + return x + } + + public func connect(from: String, to: String, latency: Duration?) async throws -> InMemoryNetworkConnection { + if let initiator = networkEndpoint(named: from), let destination = networkEndpoint(named: to) { + guard await destination.listening == true else { + throw InMemoryNetworkErrors.EndpointNotListening(name: to) + } + + let newConnection = await InMemoryNetworkConnection( + from: initiator, + to: destination, + latency: latency, + trace: self.enableTracing + ) + simulatedConnections.append(newConnection) + await destination.acceptNewConnection(newConnection) + return newConnection + } else { + throw InMemoryNetworkErrors.NoSuchEndpoint(name: to) + } + } + + public func terminateConnection(_ id: UUID) async { + if let connectionIndex = simulatedConnections.firstIndex(where: { $0.id == id }) { + let connection = simulatedConnections[connectionIndex] + await connection.close() + simulatedConnections.remove(at: connectionIndex) + } + } + + public func messagesReceivedBy(name: String) async -> [SyncV1Msg] { + if let msgs = await self.endpoints[name]?.received_messages { + msgs + } else { + [] + } + } + + public func messagesSentBy(name: String) async -> [SyncV1Msg] { + if let msgs = await self.endpoints[name]?.sent_messages { + msgs + } else { + [] + } + } + + /// WIPES TEST NETWORK and resets all connections, but leaves endpoints intact and configured + public func resetTestNetwork() async { + for endpoint in self.endpoints.values { + await endpoint.wipe() + } + endpoints.removeAll() + + for connection in simulatedConnections { + await connection.close() + } + simulatedConnections = [] + } +} diff --git a/Packages/automerge-repo/Tests/AutomergeRepoTests/TestNetworkProviders/TestOutgoingNetworkProvider.swift b/Packages/automerge-repo/Tests/AutomergeRepoTests/TestNetworkProviders/TestOutgoingNetworkProvider.swift new file mode 100644 index 00000000..b8e31f40 --- /dev/null +++ b/Packages/automerge-repo/Tests/AutomergeRepoTests/TestNetworkProviders/TestOutgoingNetworkProvider.swift @@ -0,0 +1,203 @@ +import Automerge +import AutomergeRepo +import Foundation + +public struct TestOutgoingNetworkConfiguration: Sendable, CustomDebugStringConvertible { + let remotePeer: PEER_ID + let remotePeerMetadata: PeerMetadata? + let msgResponse: @Sendable (SyncV1Msg) async -> SyncV1Msg? + + public var debugDescription: String { + "peer: \(remotePeer), metadata: \(remotePeerMetadata?.debugDescription ?? "none")" + } + + init( + remotePeer: PEER_ID, + remotePeerMetadata: PeerMetadata?, + msgResponse: @Sendable @escaping (SyncV1Msg) async -> SyncV1Msg + ) { + self.remotePeer = remotePeer + self.remotePeerMetadata = remotePeerMetadata + self.msgResponse = msgResponse + } + + public static let simple: @Sendable (SyncV1Msg) async -> SyncV1Msg? = { msg in + var doc = Document() + var syncState = SyncState() + let peerId: PEER_ID = "SIMPLE REMOTE TEST" + let peerMetadata: PeerMetadata? = PeerMetadata(storageId: "SIMPLE STORAGE", isEphemeral: true) + switch msg { + case let .join(msg): + return .peer(.init( + senderId: peerId, + targetId: msg.senderId, + storageId: peerMetadata?.storageId, + ephemeral: peerMetadata?.isEphemeral ?? false + )) + case .peer: + return nil + case .leave: + return nil + case .error: + return nil + case let .request(msg): + // everything is always unavailable + return .unavailable(.init(documentId: msg.documentId, senderId: peerId, targetId: msg.senderId)) + case let .sync(msg): + do { + try doc.receiveSyncMessage(state: syncState, message: msg.data) + if let returnData = doc.generateSyncMessage(state: syncState) { + return .sync(.init( + documentId: msg.documentId, + senderId: peerId, + targetId: msg.senderId, + sync_message: returnData + )) + } + } catch { + return .error(.init(message: error.localizedDescription)) + } + return nil + case .unavailable: + return nil + case .ephemeral: + return nil // TODO: RESPONSE EXAMPLE + case .remoteSubscriptionChange: + return nil + case .remoteHeadsChanged: + return nil + case .unknown: + return nil + } + } +} + +/// A Test network that operates in memory +/// +/// Acts akin to an outbound connection - doesn't "connect" and trigger messages until you explicitly ask +public actor TestOutgoingNetworkProvider: NetworkProvider { + public var peeredConnections: [PeerConnection] = [] + + public typealias NetworkConnectionEndpoint = String + + public nonisolated var debugDescription: String { + "TestOutgoingNetworkProvider" + } + + public nonisolated var description: String { + "TestNetwork" + } + + var delegate: (any NetworkEventReceiver)? + + var config: TestOutgoingNetworkConfiguration? + var connected: Bool + var messages: [SyncV1Msg] = [] + + public typealias ProviderConfiguration = TestOutgoingNetworkConfiguration + + init() { + self.connected = false + self.delegate = nil + } + + public func configure(_ config: TestOutgoingNetworkConfiguration) async { + self.config = config + } + + public var connectedPeer: PEER_ID? { + get async { + if let config = self.config, self.connected == true { + return config.remotePeer + } + return nil + } + } + + public func connect(to _: String) async throws { + do { + guard let config = self.config else { + throw UnconfiguredTestNetwork() + } + self.peeredConnections.append(PeerConnection( + peerId: config.remotePeer, + peerMetadata: config.remotePeerMetadata + )) + await self.delegate?.receiveEvent( + event: .peerCandidate( + payload: .init( + peerId: config.remotePeer, + peerMetadata: config.remotePeerMetadata + ) + ) + ) + try await Task.sleep(for: .milliseconds(250)) + await self.delegate?.receiveEvent( + event: .ready( + payload: .init( + peerId: config.remotePeer, + peerMetadata: config.remotePeerMetadata + ) + ) + ) + self.connected = true + + } catch { + self.connected = false + } + } + + public func disconnect() async { + self.connected = false + } + + public func ready() async -> Bool { + self.connected + } + + public func send(message: SyncV1Msg, to _: PEER_ID?) async { + self.messages.append(message) + if let response = await config?.msgResponse(message) { + await delegate?.receiveEvent(event: .message(payload: response)) + } + } + + public func receiveMessage(msg _: SyncV1Msg) async { + // no-op on the receive, as all "responses" are generated by a closure provided + // by the configuration of this test network provider. + } + + public func setDelegate( + _ delegate: any AutomergeRepo.NetworkEventReceiver, + as _: AutomergeRepo.PEER_ID, + with _: AutomergeRepo.PeerMetadata? + ) async { + self.delegate = delegate + } + + // MARK: TESTING SPECIFIC API + + public func disconnectNow() async { + guard let config = self.config else { + fatalError("Attempting to disconnect an unconfigured testing network") + } + if self.connected { + self.connected = false + await delegate?.receiveEvent(event: .peerDisconnect(payload: .init(peerId: config.remotePeer))) + } + } + + public func messagesReceivedByRemotePeer() async -> [SyncV1Msg] { + self.messages + } + + /// WIPES TEST NETWORK AND ERASES DELEGATE SETTING + public func resetTestNetwork() async { + guard self.config != nil else { + fatalError("Attempting to reset an unconfigured testing network") + } + self.connected = false + self.messages = [] + self.delegate = nil + } +} diff --git a/Packages/automerge-repo/Tests/AutomergeRepoTests/TestNetworkProviders/UnconfiguredTestNetwork.swift b/Packages/automerge-repo/Tests/AutomergeRepoTests/TestNetworkProviders/UnconfiguredTestNetwork.swift new file mode 100644 index 00000000..6d76f716 --- /dev/null +++ b/Packages/automerge-repo/Tests/AutomergeRepoTests/TestNetworkProviders/UnconfiguredTestNetwork.swift @@ -0,0 +1,7 @@ +import Foundation + +struct UnconfiguredTestNetwork: LocalizedError { + public var errorDescription: String? { + "The test network is not configured." + } +} diff --git a/Packages/automerge-repo/Tests/AutomergeRepoTests/TestStorageProviders/InMemoryStorage.swift b/Packages/automerge-repo/Tests/AutomergeRepoTests/TestStorageProviders/InMemoryStorage.swift new file mode 100644 index 00000000..29cf9f74 --- /dev/null +++ b/Packages/automerge-repo/Tests/AutomergeRepoTests/TestStorageProviders/InMemoryStorage.swift @@ -0,0 +1,74 @@ +import AutomergeRepo +import struct Foundation.Data +import struct Foundation.UUID + +@globalActor public actor TestActor { + public static var shared = TestActor() +} + +/// An in-memory only storage provider. +@TestActor +public final class InMemoryStorage: StorageProvider { + public nonisolated let id: STORAGE_ID = UUID().uuidString + + var _storage: [DocumentId: Data] = [:] + var _incrementalChunks: [CombinedKey: [Data]] = [:] + + public init() {} + + public struct CombinedKey: Hashable, Comparable { + public static func < (lhs: InMemoryStorage.CombinedKey, rhs: InMemoryStorage.CombinedKey) -> Bool { + if lhs.prefix == rhs.prefix { + return lhs.id < rhs.id + } + return lhs.prefix < rhs.prefix + } + + public let id: DocumentId + public let prefix: String + } + + public func load(id: DocumentId) async -> Data? { + _storage[id] + } + + public func save(id: DocumentId, data: Data) async { + _storage[id] = data + } + + public func remove(id: DocumentId) async { + _storage.removeValue(forKey: id) + } + + // MARK: Incremental Load Support + + public func addToRange(id: DocumentId, prefix: String, data: Data) async { + var dataArray: [Data] = _incrementalChunks[CombinedKey(id: id, prefix: prefix)] ?? [] + dataArray.append(data) + _incrementalChunks[CombinedKey(id: id, prefix: prefix)] = dataArray + } + + public func loadRange(id: DocumentId, prefix: String) async -> [Data] { + _incrementalChunks[CombinedKey(id: id, prefix: prefix)] ?? [] + } + + public func removeRange(id: DocumentId, prefix: String, data: [Data]) async { + var chunksForKey: [Data] = _incrementalChunks[CombinedKey(id: id, prefix: prefix)] ?? [] + for d in data { + if let indexToRemove = chunksForKey.firstIndex(of: d) { + chunksForKey.remove(at: indexToRemove) + } + } + _incrementalChunks[CombinedKey(id: id, prefix: prefix)] = chunksForKey + } + + // MARK: Testing Spies/Support + + public func storageKeys() -> [DocumentId] { + _storage.keys.sorted() + } + + public func incrementalKeys() -> [CombinedKey] { + _incrementalChunks.keys.sorted() + } +} diff --git a/Packages/automerge-repo/Tests/AutomergeRepoTests/TwoReposWithNetworkTests.swift b/Packages/automerge-repo/Tests/AutomergeRepoTests/TwoReposWithNetworkTests.swift new file mode 100644 index 00000000..2316cade --- /dev/null +++ b/Packages/automerge-repo/Tests/AutomergeRepoTests/TwoReposWithNetworkTests.swift @@ -0,0 +1,297 @@ +import Automerge +@testable import AutomergeRepo +import AutomergeUtilities +import DistributedTracer +import Foundation +import Logging +import OTel +import OTLPGRPC +import RegexBuilder +import ServiceLifecycle +import Tracing +import XCTest + +final class TwoReposWithNetworkTests: XCTestCase { + let network = InMemoryNetwork.shared + var repoOne: Repo! + var repoTwo: Repo! + + var adapterOne: InMemoryNetworkEndpoint! + var adapterTwo: InMemoryNetworkEndpoint! + + override func setUp() async throws { + await TestTracer.shared.bootstrap(serviceName: "RepoTests") + await withSpan("setUp") { _ in + + await withSpan("resetTestNetwork") { _ in + await network.resetTestNetwork() + } + + await withSpan("TwoReposWithNetworkTests_setup") { _ in + + let endpoints = await network.endpoints + XCTAssertEqual(endpoints.count, 0) + + repoOne = Repo(sharePolicy: SharePolicies.readonly) + // Repo setup WITHOUT any storage subsystem + let storageId = await repoOne.storageId() + XCTAssertNil(storageId) + + adapterOne = await network.createNetworkEndpoint( + config: .init( + listeningNetwork: false, + name: "One" + ) + ) + await repoOne.addNetworkAdapter(adapter: adapterOne) + + let peersOne = await repoOne.peers() + XCTAssertEqual(peersOne, []) + + repoTwo = Repo(sharePolicy: SharePolicies.agreeable) + adapterTwo = await network.createNetworkEndpoint( + config: .init( + listeningNetwork: true, + name: "Two" + ) + ) + await repoTwo.addNetworkAdapter(adapter: adapterTwo) + + let peersTwo = await repoTwo.peers() + XCTAssertEqual(peersTwo, []) + + let connections = await network.connections() + XCTAssertEqual(connections.count, 0) + + let endpointRecount = await network.endpoints + XCTAssertEqual(endpointRecount.count, 2) + } + } + } + + override func tearDown() async throws { + if let tracer = await TestTracer.shared.tracer { + tracer.forceFlush() + // Testing does NOT have a polite shutdown waiting for a flush to complete, so + // we explicitly give it some extra time here to flush out any spans remaining. + try await Task.sleep(for: .seconds(1)) + } + } + + func testMostBasicRepoStartingPoints() async throws { + // Repo + // property: peers [PeerId] - all (currently) connected peers + let peersOne = await repoOne.peers() + let peersTwo = await repoTwo.peers() + XCTAssertEqual(peersOne, []) + XCTAssertEqual(peersOne, peersTwo) + + let knownIdsOne = await repoOne.documentIds() + XCTAssertEqual(knownIdsOne, []) + + let knownIdsTwo = await repoOne.documentIds() + XCTAssertEqual(knownIdsTwo, knownIdsOne) + } + + func testCreateNetworkEndpoint() async throws { + let _ = await network.createNetworkEndpoint( + config: .init( + listeningNetwork: false, + name: "Z" + ) + ) + let endpoints = await network.endpoints + XCTAssertEqual(endpoints.count, 3) + let z = endpoints["Z"] + XCTAssertNotNil(z) + } + + func testConnect() async throws { + // Enable the following line to see the messages from the connections + // point of view: + + // await network.traceConnections(true) + + // Enable logging of received for the adapter: + await adapterOne.logReceivedMessages(true) + await adapterTwo.logReceivedMessages(true) + // Logging doesn't show up in exported test output - it's interleaved into Xcode's console + // which is useful for debugging tests + + try await withSpan("testConnect") { _ in + try await adapterOne.connect(to: "Two") + + let connectionIdFromOne = await adapterOne._connections.first?.id + let connectionIdFromTwo = await adapterTwo._connections.first?.id + XCTAssertEqual(connectionIdFromOne, connectionIdFromTwo) + + let peersOne = await adapterOne.peeredConnections + let peersTwo = await adapterTwo.peeredConnections + XCTAssertFalse(peersOne.isEmpty) + XCTAssertFalse(peersTwo.isEmpty) + } + } + + func testCreate() async throws { + try await withSpan("testCreate") { _ in + + // initial conditions + var knownOnTwo = await repoTwo.documentIds() + var knownOnOne = await repoOne.documentIds() + XCTAssertEqual(knownOnOne.count, 0) + XCTAssertEqual(knownOnTwo.count, 0) + + // Create and add some doc content to the "server" repo - RepoTwo + let newDocId = DocumentId() + let newDoc = try await withSpan("repoTwo.create") { _ in + try await repoTwo.create(id: newDocId) + } + // add some content to the new document + try newDoc.doc.put(obj: .ROOT, key: "title", value: .String("INITIAL VALUE")) + + XCTAssertNotNil(newDoc) + knownOnTwo = await repoTwo.documentIds() + XCTAssertEqual(knownOnTwo.count, 1) + XCTAssertEqual(knownOnTwo[0], newDocId) + + knownOnOne = await repoOne.documentIds() + XCTAssertEqual(knownOnOne.count, 0) + + // "GO ONLINE" + // await network.traceConnections(true) + // await adapterTwo.logReceivedMessages(true) + try await withSpan("adapterOne.connect") { _ in + try await adapterOne.connect(to: "Two") + } + + // verify that after sync, both repos have a copy of the document + knownOnOne = await repoOne.documentIds() + XCTAssertEqual(knownOnOne.count, 1) + XCTAssertEqual(knownOnOne[0], newDocId) + } + } + + func testFind() async throws { + // initial conditions + var knownOnTwo = await repoTwo.documentIds() + var knownOnOne = await repoOne.documentIds() + XCTAssertEqual(knownOnOne.count, 0) + XCTAssertEqual(knownOnTwo.count, 0) + + // "GO ONLINE" + // await network.traceConnections(true) + // await adapterTwo.logReceivedMessages(true) + try await withSpan("adapterOne.connect") { _ in + try await adapterOne.connect(to: "Two") + } + + // Create and add some doc content to the "server" repo - RepoTwo + let newDocId = DocumentId() + let newDoc = try await withSpan("repoTwo.create") { _ in + try await repoTwo.create(id: newDocId) + } + XCTAssertNotNil(newDoc.doc) + // add some content to the new document + try newDoc.doc.put(obj: .ROOT, key: "title", value: .String("INITIAL VALUE")) + + // Introducing a doc _after_ connecting shouldn't share it automatically + knownOnTwo = await repoTwo.documentIds() + XCTAssertEqual(knownOnTwo.count, 1) + XCTAssertEqual(knownOnTwo[0], newDocId) + + knownOnOne = await repoOne.documentIds() + XCTAssertEqual(knownOnOne.count, 0) + + // We can _request_ the document, and should find it + do { + let foundDoc = try await repoOne.find(id: newDocId) + XCTAssertTrue( + RepoHelpers.equalContents(doc1: foundDoc.doc, doc2: newDoc.doc) + ) + } catch { + let errMsg = error.localizedDescription + print(errMsg) + } + } + + func testFindFail() async throws { + // initial conditions + var knownOnTwo = await repoTwo.documentIds() + var knownOnOne = await repoOne.documentIds() + XCTAssertEqual(knownOnOne.count, 0) + XCTAssertEqual(knownOnTwo.count, 0) + + // Create and add some doc content to the "client" repo - RepoOne + let newDocId = DocumentId() + let newDoc = try await withSpan("repoTwo.create") { _ in + try await repoOne.create(id: newDocId) + } + XCTAssertNotNil(newDoc.doc) + // add some content to the new document + try newDoc.doc.put(obj: .ROOT, key: "title", value: .String("INITIAL VALUE")) + + knownOnTwo = await repoTwo.documentIds() + XCTAssertEqual(knownOnTwo.count, 0) + + knownOnOne = await repoOne.documentIds() + XCTAssertEqual(knownOnOne.count, 1) + XCTAssertEqual(knownOnOne[0], newDocId) + // "GO ONLINE" + await network.traceConnections(true) + // await adapterTwo.logReceivedMessages(true) + try await withSpan("adapterOne.connect") { _ in + try await adapterOne.connect(to: "Two") + } + + // Two doesn't automatically get the document because RepoOne + // isn't configured to "share" automatically on connect + // (it's not "agreeable") + knownOnTwo = await repoTwo.documentIds() + XCTAssertEqual(knownOnTwo.count, 0) + + knownOnOne = await repoOne.documentIds() + XCTAssertEqual(knownOnOne.count, 1) + + // We can _request_ the document, but should be denied + do { + let _ = try await repoTwo.find(id: newDocId) + XCTFail("RepoOne is private and should NOT share the document") + } catch { + let errMsg = error.localizedDescription + print(errMsg) + } + } +// +// func testDelete() async throws { +// let myId = DocumentId() +// let _ = try await repo.create(id: myId) +// var knownIds = await repo.documentIds() +// XCTAssertEqual(knownIds.count, 1) +// +// try await repo.delete(id: myId) +// knownIds = await repo.documentIds() +// XCTAssertEqual(knownIds.count, 0) +// +// do { +// let _ = try await repo.find(id: DocumentId()) +// XCTFail() +// } catch {} +// } +// +// func testClone() async throws { +// let myId = DocumentId() +// let handle = try await repo.create(id: myId) +// XCTAssertEqual(myId, handle.id) +// +// let clonedHandle = try await repo.clone(id: myId) +// XCTAssertNotEqual(handle.id, clonedHandle.id) +// XCTAssertNotEqual(handle.doc.actor, clonedHandle.doc.actor) +// +// let knownIds = await repo.documentIds() +// XCTAssertEqual(knownIds.count, 2) +// } + + // TBD: + // - func storageIdForPeer(peerId) -> StorageId + // - func subscribeToRemotes([StorageId]) +} diff --git a/Packages/automerge-repo/collector-config.yaml b/Packages/automerge-repo/collector-config.yaml new file mode 100644 index 00000000..df239f54 --- /dev/null +++ b/Packages/automerge-repo/collector-config.yaml @@ -0,0 +1,24 @@ +receivers: + otlp: + protocols: + grpc: + endpoint: otel-collector:4317 + +exporters: + logging: + verbosity: detailed + + otlp: + endpoint: jaeger:4317 + tls: + insecure: true + + zipkin: + endpoint: "http://zipkin:9411/api/v2/spans" + + +service: + pipelines: + traces: + receivers: otlp + exporters: [logging, otlp, zipkin] diff --git a/Packages/automerge-repo/docker-compose-jaeger.yml b/Packages/automerge-repo/docker-compose-jaeger.yml new file mode 100644 index 00000000..0ffc197a --- /dev/null +++ b/Packages/automerge-repo/docker-compose-jaeger.yml @@ -0,0 +1,10 @@ +version: '3' +services: + jaeger: + # Jaeger is one of many options we can choose from as our distributed tracing backend. + # It supports OTLP out of the box so it's very easy to get started. + # https://www.jaegertracing.io + image: jaegertracing/all-in-one + ports: + - "4317:4317" # This is where the OTLPGRPCSpanExporter sends its spans + - "16686:16686" # This is Jaeger's Web UI, visualizing recorded traces diff --git a/Packages/automerge-repo/docker-compose-zipkin-jaeger.yml b/Packages/automerge-repo/docker-compose-zipkin-jaeger.yml new file mode 100644 index 00000000..b4522b41 --- /dev/null +++ b/Packages/automerge-repo/docker-compose-zipkin-jaeger.yml @@ -0,0 +1,26 @@ +version: '3' +services: + otel-collector: + image: otel/opentelemetry-collector-contrib:latest + command: ["--config=/etc/config.yaml"] + volumes: + - ./collector-config.yaml:/etc/config.yaml + ports: + - "4317:4317" + networks: [exporter] + depends_on: [zipkin, jaeger] + + zipkin: + image: openzipkin/zipkin:latest + ports: + - "9411:9411" + networks: [exporter] + + jaeger: + image: jaegertracing/all-in-one + ports: + - "16686:16686" + networks: [exporter] + +networks: + exporter: diff --git a/Packages/automerge-repo/notes.md b/Packages/automerge-repo/notes.md new file mode 100644 index 00000000..35a049e9 --- /dev/null +++ b/Packages/automerge-repo/notes.md @@ -0,0 +1,10 @@ +# using docker-compose + +`docker-compose -f someDockerComposefile up -d`, for example: + +```bash +docker-compose -f docker-compose.yml up -d +``` + +there's an equiv for Tempo, and another for sigNoz +https://github.com/SigNoz/signoz/tree/develop/deploy/docker/clickhouse-setup diff --git a/notes/.gitignore b/Packages/automerge-repo/notes/.gitignore similarity index 83% rename from notes/.gitignore rename to Packages/automerge-repo/notes/.gitignore index e60a0ec4..cc4de8dc 100644 --- a/notes/.gitignore +++ b/Packages/automerge-repo/notes/.gitignore @@ -2,3 +2,4 @@ package.json package-lock.json node_modules *.svg +yarn.lock diff --git a/notes/README.md b/Packages/automerge-repo/notes/README.md similarity index 100% rename from notes/README.md rename to Packages/automerge-repo/notes/README.md diff --git a/notes/generate.bash b/Packages/automerge-repo/notes/generate.bash similarity index 61% rename from notes/generate.bash rename to Packages/automerge-repo/notes/generate.bash index 3b006fff..bc3dd1d9 100755 --- a/notes/generate.bash +++ b/Packages/automerge-repo/notes/generate.bash @@ -2,6 +2,15 @@ # set -eou pipefail +# see https://stackoverflow.com/questions/4774054/reliable-way-for-a-bash-script-to-get-the-full-path-to-itself +THIS_SCRIPT_DIR="$( cd -- "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )" + +pushd $THIS_SCRIPT_DIR + +if [ ! -d node_modules ]; then + yarn install +fi + ./node_modules/.bin/mmdc -i websocket_sync_states.mmd -o websocket_sync_states.svg ./node_modules/.bin/mmdc -i websocket_sync_initial.mmd -o wss_initial.svg @@ -10,4 +19,8 @@ set -eou pipefail ./node_modules/.bin/mmdc -i websocket_sync_closed.mmd -o wss_closed.svg ./node_modules/.bin/mmdc -i websocket_strategy_sync.mmd -o websocket_strategy_sync.svg -./node_modules/.bin/mmdc -i websocket_strategy_request.mmd -o websocket_stragegy_request.svg \ No newline at end of file +./node_modules/.bin/mmdc -i websocket_strategy_request.mmd -o websocket_stragegy_request.svg + +mv *.svg ../Sources/AutomergeRepo/Documentation.docc/Resources/ + +popd diff --git a/notes/websocket_strategy_request.mmd b/Packages/automerge-repo/notes/websocket_strategy_request.mmd similarity index 100% rename from notes/websocket_strategy_request.mmd rename to Packages/automerge-repo/notes/websocket_strategy_request.mmd diff --git a/notes/websocket_strategy_sync.mmd b/Packages/automerge-repo/notes/websocket_strategy_sync.mmd similarity index 100% rename from notes/websocket_strategy_sync.mmd rename to Packages/automerge-repo/notes/websocket_strategy_sync.mmd diff --git a/notes/websocket_sync_closed.mmd b/Packages/automerge-repo/notes/websocket_sync_closed.mmd similarity index 100% rename from notes/websocket_sync_closed.mmd rename to Packages/automerge-repo/notes/websocket_sync_closed.mmd diff --git a/notes/websocket_sync_handshake.mmd b/Packages/automerge-repo/notes/websocket_sync_handshake.mmd similarity index 100% rename from notes/websocket_sync_handshake.mmd rename to Packages/automerge-repo/notes/websocket_sync_handshake.mmd diff --git a/notes/websocket_sync_initial.mmd b/Packages/automerge-repo/notes/websocket_sync_initial.mmd similarity index 100% rename from notes/websocket_sync_initial.mmd rename to Packages/automerge-repo/notes/websocket_sync_initial.mmd diff --git a/notes/websocket_sync_peered.mmd b/Packages/automerge-repo/notes/websocket_sync_peered.mmd similarity index 100% rename from notes/websocket_sync_peered.mmd rename to Packages/automerge-repo/notes/websocket_sync_peered.mmd diff --git a/notes/websocket_sync_states.mmd b/Packages/automerge-repo/notes/websocket_sync_states.mmd similarity index 100% rename from notes/websocket_sync_states.mmd rename to Packages/automerge-repo/notes/websocket_sync_states.mmd diff --git a/PrivacyInfo.xcprivacy b/PrivacyInfo.xcprivacy index 8ceb89cc..d461aad9 100644 --- a/PrivacyInfo.xcprivacy +++ b/PrivacyInfo.xcprivacy @@ -12,7 +12,15 @@ NSPrivacyAccessedAPITypes - + + NSPrivacyAccessedAPIType + NSPrivacyAccessedAPICategoryUserDefaults + NSPrivacyAccessedAPITypeReasons + + CA92.1 + 1C8F.1 + +