<?xml version="1.0"?>
<feed xmlns="http://www.w3.org/2005/Atom" xml:lang="en-GB">
	<id>https://www.vigyanwiki.in/index.php?action=history&amp;feed=atom&amp;title=Template%3AExistential_risk_from_artificial_intelligence</id>
	<title>Template:Existential risk from artificial intelligence - Revision history</title>
	<link rel="self" type="application/atom+xml" href="https://www.vigyanwiki.in/index.php?action=history&amp;feed=atom&amp;title=Template%3AExistential_risk_from_artificial_intelligence"/>
	<link rel="alternate" type="text/html" href="https://www.vigyanwiki.in/index.php?title=Template:Existential_risk_from_artificial_intelligence&amp;action=history"/>
	<updated>2026-05-06T15:04:16Z</updated>
	<subtitle>Revision history for this page on the wiki</subtitle>
	<generator>MediaWiki 1.39.3</generator>
	<entry>
		<id>https://www.vigyanwiki.in/index.php?title=Template:Existential_risk_from_artificial_intelligence&amp;diff=161334&amp;oldid=prev</id>
		<title>Indicwiki: 1 revision imported from :alpha:Template:Existential_risk_from_artificial_intelligence</title>
		<link rel="alternate" type="text/html" href="https://www.vigyanwiki.in/index.php?title=Template:Existential_risk_from_artificial_intelligence&amp;diff=161334&amp;oldid=prev"/>
		<updated>2023-05-17T11:02:42Z</updated>

		<summary type="html">&lt;p&gt;1 revision imported from &lt;a href=&quot;https://alpha.indicwiki.in/index.php?title=Template:Existential_risk_from_artificial_intelligence&quot; class=&quot;extiw&quot; title=&quot;alpha:Template:Existential risk from artificial intelligence&quot;&gt;alpha:Template:Existential_risk_from_artificial_intelligence&lt;/a&gt;&lt;/p&gt;
&lt;table style=&quot;background-color: #fff; color: #202122;&quot; data-mw=&quot;interface&quot;&gt;
				&lt;tr class=&quot;diff-title&quot; lang=&quot;en-GB&quot;&gt;
				&lt;td colspan=&quot;1&quot; style=&quot;background-color: #fff; color: #202122; text-align: center;&quot;&gt;← Older revision&lt;/td&gt;
				&lt;td colspan=&quot;1&quot; style=&quot;background-color: #fff; color: #202122; text-align: center;&quot;&gt;Revision as of 16:32, 17 May 2023&lt;/td&gt;
				&lt;/tr&gt;&lt;tr&gt;&lt;td colspan=&quot;2&quot; class=&quot;diff-notice&quot; lang=&quot;en-GB&quot;&gt;&lt;div class=&quot;mw-diff-empty&quot;&gt;(No difference)&lt;/div&gt;
&lt;/td&gt;&lt;/tr&gt;&lt;/table&gt;</summary>
		<author><name>Indicwiki</name></author>
	</entry>
	<entry>
		<id>https://www.vigyanwiki.in/index.php?title=Template:Existential_risk_from_artificial_intelligence&amp;diff=161333&amp;oldid=prev</id>
		<title>alpha&gt;Indicwiki: Created page with &quot;{{navbox | name = Existential risk from artificial intelligence | title = Existential risk from artificial intellig...&quot;</title>
		<link rel="alternate" type="text/html" href="https://www.vigyanwiki.in/index.php?title=Template:Existential_risk_from_artificial_intelligence&amp;diff=161333&amp;oldid=prev"/>
		<updated>2023-04-18T07:35:20Z</updated>

		<summary type="html">&lt;p&gt;Created page with &amp;quot;{{navbox | name = Existential risk from artificial intelligence | title = &lt;a href=&quot;/index.php?title=Existential_risk_from_artificial_general_intelligence&amp;amp;action=edit&amp;amp;redlink=1&quot; class=&quot;new&quot; title=&quot;Existential risk from artificial general intelligence (page does not exist)&quot;&gt;Existential risk&lt;/a&gt; from artificial intellig...&amp;quot;&lt;/p&gt;
&lt;p&gt;&lt;b&gt;New page&lt;/b&gt;&lt;/p&gt;&lt;div&gt;{{navbox&lt;br /&gt;
| name = Existential risk from artificial intelligence&lt;br /&gt;
| title = [[Existential risk from artificial general intelligence|Existential risk]] from [[artificial intelligence]]&lt;br /&gt;
| state = {{{state|{{{1|autocollapse}}}}}}&lt;br /&gt;
| listclass = hlist&lt;br /&gt;
&lt;br /&gt;
| group1 = Concepts&lt;br /&gt;
| list1 =&lt;br /&gt;
* [[AI alignment]]&lt;br /&gt;
* [[AI capability control]]&lt;br /&gt;
* [[AI safety]]&lt;br /&gt;
* [[AI takeover]]&lt;br /&gt;
* [[Accelerating change]]&lt;br /&gt;
* [[Existential risk from artificial general intelligence]]&lt;br /&gt;
* [[Friendly artificial intelligence]]&lt;br /&gt;
* [[Instrumental convergence]]&lt;br /&gt;
* [[Intelligence explosion]]&lt;br /&gt;
* [[Machine ethics]]&lt;br /&gt;
* [[Superintelligence]]&lt;br /&gt;
* [[Technological singularity]]&lt;br /&gt;
&lt;br /&gt;
| group2 = Organizations&lt;br /&gt;
| list2 =&lt;br /&gt;
* [[Allen Institute for AI]]&lt;br /&gt;
* [[Alignment Research Center]]&lt;br /&gt;
* [[Center for Applied Rationality]]&lt;br /&gt;
* [[Center for Human-Compatible Artificial Intelligence]]&lt;br /&gt;
* [[Centre for the Study of Existential Risk]]&lt;br /&gt;
* [[DeepMind]]&lt;br /&gt;
* [[Foundational Questions Institute]]&lt;br /&gt;
* [[Future of Humanity Institute]]&lt;br /&gt;
* [[Future of Life Institute]]&lt;br /&gt;
* [[Humanity+]]&lt;br /&gt;
* [[Institute for Ethics and Emerging Technologies]]&lt;br /&gt;
* [[Leverhulme Centre for the Future of Intelligence]]&lt;br /&gt;
* [[Machine Intelligence Research Institute]]&lt;br /&gt;
* [[OpenAI]]&lt;br /&gt;
&lt;br /&gt;
| group3 = People&lt;br /&gt;
| list3 =&lt;br /&gt;
* [[Slate Star Codex|Scott Alexander]]&lt;br /&gt;
* [[Nick Bostrom]]&lt;br /&gt;
* [[K. Eric Drexler|Eric Drexler]]&lt;br /&gt;
* [[Sam Harris]]&lt;br /&gt;
* [[Stephen Hawking]]&lt;br /&gt;
* [[Bill Hibbard]]&lt;br /&gt;
* [[Bill Joy]]&lt;br /&gt;
* [[Elon Musk]]&lt;br /&gt;
* [[Steve Omohundro]]&lt;br /&gt;
* [[Huw Price]]&lt;br /&gt;
* [[Martin Rees]]&lt;br /&gt;
* [[Stuart J. Russell]]&lt;br /&gt;
* [[Jaan Tallinn]]&lt;br /&gt;
* [[Max Tegmark]]&lt;br /&gt;
* [[Frank Wilczek]]&lt;br /&gt;
* [[Roman Yampolskiy]]&lt;br /&gt;
* [[Andrew Yang]]&lt;br /&gt;
* [[Eliezer Yudkowsky]]&lt;br /&gt;
&lt;br /&gt;
| group4 = Other&lt;br /&gt;
| list4 = &lt;br /&gt;
* [[Global catastrophic risk#Artificial intelligence|Artificial intelligence as a global catastrophic risk]]&lt;br /&gt;
* [[Artificial general intelligence#Controversies and dangers|Controversies and dangers of artificial general intelligence]]&lt;br /&gt;
* [[Ethics of artificial intelligence]]&lt;br /&gt;
* [[Suffering risks]]&lt;br /&gt;
* ''[[Human Compatible]]''&lt;br /&gt;
* [[Open Letter on Artificial Intelligence]]&lt;br /&gt;
* ''[[Our Final Invention]]''&lt;br /&gt;
* ''[[The Precipice: Existential Risk and the Future of Humanity|The Precipice]]''&lt;br /&gt;
* ''[[Superintelligence: Paths, Dangers, Strategies]]''&lt;br /&gt;
* ''[[Do You Trust This Computer?]]''&lt;br /&gt;
* [[Artificial Intelligence Act]]&lt;br /&gt;
| below = &lt;br /&gt;
{{icon|category}} [[:Category:Existential risk from artificial general intelligence|Category]]&lt;br /&gt;
}}&amp;lt;noinclude&amp;gt;&lt;br /&gt;
{{documentation|content=&lt;br /&gt;
{{collapsible option}}&lt;br /&gt;
[[Category: Global risk navigational boxes]]&lt;br /&gt;
[[Category:Technology and applied science navigational boxes]]&lt;br /&gt;
}}&amp;lt;/noinclude&amp;gt;&lt;/div&gt;</summary>
		<author><name>alpha&gt;Indicwiki</name></author>
	</entry>
</feed>