{ localUrl: '../page/38p.html', arbitalUrl: 'https://arbital.com/p/38p', rawJsonUrl: '../raw/38p.json', likeableId: '2197', likeableType: 'page', myLikeValue: '0', likeCount: '0', dislikeCount: '0', likeScore: '0', individualLikes: [], pageId: '38p', edit: '2', editSummary: '', prevEdit: '1', currentEdit: '2', wasPublished: 'true', type: 'comment', title: '"> The key property we want ..."', clickbait: '', textLength: '2067', alias: '38p', externalUrl: '', sortChildrenBy: 'recentFirst', hasVote: 'false', voteType: '', votesAnonymous: 'false', editCreatorId: 'EliezerYudkowsky', editCreatedAt: '2016-04-17 20:32:03', pageCreatorId: 'EliezerYudkowsky', pageCreatedAt: '2016-04-17 20:31:07', seeDomainId: '0', editDomainId: 'EliezerYudkowsky', submitToDomainId: '0', isAutosave: 'false', isSnapshot: 'false', isLiveEdit: 'true', isMinorEdit: 'false', indirectTeacher: 'false', todoCount: '0', isEditorComment: 'false', isApprovedComment: 'true', isResolved: 'false', snapshotText: '', anchorContext: '', anchorText: '', anchorOffset: '0', mergedInto: '', isDeleted: 'false', viewCount: '167', text: '> The key property we want from the distinguisher is that it can learn to detect relevant differences between the model and the real system. This seems like it might be the kind of problem that I would classify as "probably easy if the agent is powerful and the difference is really important" and you would classify as "way too hard to count on."\n\n*Counting on* things before you've found a solution to them isn't very [1cv mindset], but I do consider this a promising approach. Definitely, the generative-adversarial approach in modern neural networks causes me to hope that this is the sort of thing that actually works in practice. So I might not be as pessimistic as you think? I still think in general that one does not go about taking things for granted, but the notion of faithful simulation seems like one that could prove to have a tractable core after hammering on it for a bit, and it also seems very possible that if you're reasonably smart and you can't detect any expected differences in the behavior of neural columns then the corresponding human simulation is faithful.\n\nMy current thoughts on possible failure modes:\n\n1. "No differences you know about" might mix up the map and the territority in some obscurely fatal way that leads to the equivalent of the AI deliberately managing to 'not know' about inconvenient divergences.\n2. If we use a limited AI and don't let it run thousands of simulations of people that it can compare to thousands of brains in vats, then in practice its column-level tests won't detect cumulative neural-level differences that lead to an 80% probability of schizophrenia.\n3. The adversarial approach as written won't work because it will turn out that it's *always* possible for an equally smart adversary to tell the difference, especially for simulations that can be computed at a worthwhile speedup. Which means this test won't meaningfully discriminate in the region of intuitively faithful vs. nonfaithful simulations. (This strikes me as the sort of issue that's repairable, but perhaps not trivially so.)', metaText: '', isTextLoaded: 'true', isSubscribedToDiscussion: 'false', isSubscribedToUser: 'false', isSubscribedAsMaintainer: 'false', discussionSubscriberCount: '0', maintainerCount: '0', userSubscriberCount: '0', lastVisit: '', hasDraft: 'false', votes: [], voteSummary: 'null', muVoteSummary: '0', voteScaling: '0', currentUserVote: '-2', voteCount: '0', lockedVoteType: '', maxEditEver: '0', redLinkCount: '0', lockedBy: '', lockedUntil: '', nextPageId: '', prevPageId: '', usedAsMastery: 'false', proposalEditNum: '0', permissions: { edit: { has: 'false', reason: 'You don't have domain permission to edit this page' }, proposeEdit: { has: 'true', reason: '' }, delete: { has: 'false', reason: 'You don't have domain permission to delete this page' }, comment: { has: 'false', reason: 'You can't comment in this domain because you are not a member' }, proposeComment: { has: 'true', reason: '' } }, summaries: {}, creatorIds: [ 'EliezerYudkowsky' ], childIds: [], parentIds: [ 'faithful_simulation', '37b' ], commentIds: [], questionIds: [], tagIds: [], relatedIds: [], markIds: [], explanations: [], learnMore: [], requirements: [], subjects: [], lenses: [], lensParentId: '', pathPages: [], learnMoreTaughtMap: {}, learnMoreCoveredMap: {}, learnMoreRequiredMap: {}, editHistory: {}, domainSubmissions: {}, answers: [], answerCount: '0', commentCount: '0', newCommentCount: '0', linkedMarkCount: '0', changeLogs: [ { likeableId: '0', likeableType: 'changeLog', myLikeValue: '0', likeCount: '0', dislikeCount: '0', likeScore: '0', individualLikes: [], id: '9321', pageId: '38p', userId: 'EliezerYudkowsky', edit: '2', type: 'newEdit', createdAt: '2016-04-17 20:32:03', auxPageId: '', oldSettingsValue: '', newSettingsValue: '' }, { likeableId: '0', likeableType: 'changeLog', myLikeValue: '0', likeCount: '0', dislikeCount: '0', likeScore: '0', individualLikes: [], id: '9320', pageId: '38p', userId: 'EliezerYudkowsky', edit: '1', type: 'newEdit', createdAt: '2016-04-17 20:31:07', auxPageId: '', oldSettingsValue: '', newSettingsValue: '' } ], feedSubmissions: [], searchStrings: {}, hasChildren: 'false', hasParents: 'true', redAliases: {}, improvementTagIds: [], nonMetaTagIds: [], todos: [], slowDownMap: 'null', speedUpMap: 'null', arcPageIds: 'null', contentRequests: {} }