{
  localUrl: '../page/918.html',
  arbitalUrl: 'https://arbital.com/p/918',
  rawJsonUrl: '../raw/918.json',
  likeableId: '0',
  likeableType: 'page',
  myLikeValue: '0',
  likeCount: '0',
  dislikeCount: '0',
  likeScore: '0',
  individualLikes: [],
  pageId: '918',
  edit: '4',
  editSummary: '',
  prevEdit: '3',
  currentEdit: '4',
  wasPublished: 'true',
  type: 'comment',
  title: '"Eliezer goes back and forth between "sapient" a..."',
  clickbait: '',
  textLength: '1175',
  alias: '918',
  externalUrl: '',
  sortChildrenBy: 'recentFirst',
  hasVote: 'false',
  voteType: '',
  votesAnonymous: 'false',
  editCreatorId: 'PhilGoetz',
  editCreatedAt: '2018-03-13 05:47:11',
  pageCreatorId: 'PhilGoetz',
  pageCreatedAt: '2018-03-13 05:35:32',
  seeDomainId: '0',
  editDomainId: '3048',
  submitToDomainId: '0',
  isAutosave: 'false',
  isSnapshot: 'false',
  isLiveEdit: 'true',
  isMinorEdit: 'false',
  indirectTeacher: 'false',
  todoCount: '0',
  isEditorComment: 'false',
  isApprovedComment: 'false',
  isResolved: 'false',
  snapshotText: '',
  anchorContext: '',
  anchorText: '',
  anchorOffset: '0',
  mergedInto: '',
  isDeleted: 'false',
  viewCount: '690',
  text: 'Eliezer goes back and forth between "sapient" and "sentient", which are not synonyms.  Neither is obviously a justification for claiming moral status as an agent.\n\nIt is important either to state clearly *what* one presumes gives an agent moral status (and hence what constitutes mindcrime), or to change each occurence of "sapient", "sentient", or "personhood" to all use the same word.  I recommend stating the general case using personhood(X), a function to be supplied by the user and not defined here.  Addressing the problem depends critically on what that function is--but the statement of the general case shouldn't be bound up with the choice of personhood predicate.\n\nChoosing either "sapient" or "sentient" is problematic: "sentient" because it includes at least all mammals, and "sapient" because it really just means "intelligent", and the AI is going to be equally intelligent (defined as problem-solving or optimizing ability) whether it simulates *humans* or not.  If intelligence grants moral standing (as it seems to here), and mindcrime means trapping an agent with moral standing in the AI's world, then the construction of any AI is inherently mindcrime.',
  metaText: '',
  isTextLoaded: 'true',
  isSubscribedToDiscussion: 'false',
  isSubscribedToUser: 'false',
  isSubscribedAsMaintainer: 'false',
  discussionSubscriberCount: '1',
  maintainerCount: '1',
  userSubscriberCount: '0',
  lastVisit: '',
  hasDraft: 'false',
  votes: [],
  voteSummary: 'null',
  muVoteSummary: '0',
  voteScaling: '0',
  currentUserVote: '-2',
  voteCount: '0',
  lockedVoteType: '',
  maxEditEver: '0',
  redLinkCount: '0',
  lockedBy: '',
  lockedUntil: '',
  nextPageId: '',
  prevPageId: '',
  usedAsMastery: 'false',
  proposalEditNum: '0',
  permissions: {
    edit: {
      has: 'false',
      reason: 'You don't have domain permission to edit this page'
    },
    proposeEdit: {
      has: 'true',
      reason: ''
    },
    delete: {
      has: 'false',
      reason: 'You don't have domain permission to delete this page'
    },
    comment: {
      has: 'false',
      reason: 'You can't comment in this domain because you are not a member'
    },
    proposeComment: {
      has: 'true',
      reason: ''
    }
  },
  summaries: {},
  creatorIds: [
    'PhilGoetz'
  ],
  childIds: [],
  parentIds: [
    'mindcrime'
  ],
  commentIds: [],
  questionIds: [],
  tagIds: [],
  relatedIds: [],
  markIds: [],
  explanations: [],
  learnMore: [],
  requirements: [],
  subjects: [],
  lenses: [],
  lensParentId: '',
  pathPages: [],
  learnMoreTaughtMap: {},
  learnMoreCoveredMap: {},
  learnMoreRequiredMap: {},
  editHistory: {},
  domainSubmissions: {},
  answers: [],
  answerCount: '0',
  commentCount: '0',
  newCommentCount: '0',
  linkedMarkCount: '0',
  changeLogs: [
    {
      likeableId: '0',
      likeableType: 'changeLog',
      myLikeValue: '0',
      likeCount: '0',
      dislikeCount: '0',
      likeScore: '0',
      individualLikes: [],
      id: '22989',
      pageId: '918',
      userId: 'PhilGoetz',
      edit: '4',
      type: 'newEdit',
      createdAt: '2018-03-13 05:47:11',
      auxPageId: '',
      oldSettingsValue: '',
      newSettingsValue: ''
    },
    {
      likeableId: '0',
      likeableType: 'changeLog',
      myLikeValue: '0',
      likeCount: '0',
      dislikeCount: '0',
      likeScore: '0',
      individualLikes: [],
      id: '22988',
      pageId: '918',
      userId: 'PhilGoetz',
      edit: '3',
      type: 'newEdit',
      createdAt: '2018-03-13 05:44:51',
      auxPageId: '',
      oldSettingsValue: '',
      newSettingsValue: ''
    },
    {
      likeableId: '0',
      likeableType: 'changeLog',
      myLikeValue: '0',
      likeCount: '0',
      dislikeCount: '0',
      likeScore: '0',
      individualLikes: [],
      id: '22987',
      pageId: '918',
      userId: 'PhilGoetz',
      edit: '2',
      type: 'newEdit',
      createdAt: '2018-03-13 05:41:02',
      auxPageId: '',
      oldSettingsValue: '',
      newSettingsValue: ''
    },
    {
      likeableId: '0',
      likeableType: 'changeLog',
      myLikeValue: '0',
      likeCount: '0',
      dislikeCount: '0',
      likeScore: '0',
      individualLikes: [],
      id: '22986',
      pageId: '918',
      userId: 'PhilGoetz',
      edit: '1',
      type: 'newEdit',
      createdAt: '2018-03-13 05:35:32',
      auxPageId: '',
      oldSettingsValue: '',
      newSettingsValue: ''
    }
  ],
  feedSubmissions: [],
  searchStrings: {},
  hasChildren: 'false',
  hasParents: 'true',
  redAliases: {},
  improvementTagIds: [],
  nonMetaTagIds: [],
  todos: [],
  slowDownMap: 'null',
  speedUpMap: 'null',
  arcPageIds: 'null',
  contentRequests: {}
}