ActivityPub Viewer

A small tool to view real-world ActivityPub objects as JSON! Enter a URL or username from Mastodon or a similar service below, and we'll send a request with the right Accept header to the server to view the underlying object.

Open in browser →
{ "@context": [ "https://www.w3.org/ns/activitystreams", { "ostatus": "http://ostatus.org#", "atomUri": "ostatus:atomUri", "inReplyToAtomUri": "ostatus:inReplyToAtomUri", "conversation": "ostatus:conversation", "sensitive": "as:sensitive", "toot": "http://joinmastodon.org/ns#", "votersCount": "toot:votersCount", "Hashtag": "as:Hashtag" } ], "id": "https://tldr.nettime.org/users/remixtures/statuses/113532702285203284/activity", "type": "Create", "actor": "https://tldr.nettime.org/users/remixtures", "published": "2024-11-23T14:19:15Z", "to": [ "https://www.w3.org/ns/activitystreams#Public" ], "cc": [ "https://tldr.nettime.org/users/remixtures/followers" ], "object": { "id": "https://tldr.nettime.org/users/remixtures/statuses/113532702285203284", "type": "Note", "summary": null, "inReplyTo": null, "published": "2024-11-23T14:19:15Z", "url": "https://tldr.nettime.org/@remixtures/113532702285203284", "attributedTo": "https://tldr.nettime.org/users/remixtures", "to": [ "https://www.w3.org/ns/activitystreams#Public" ], "cc": [ "https://tldr.nettime.org/users/remixtures/followers" ], "sensitive": false, "atomUri": "https://tldr.nettime.org/users/remixtures/statuses/113532702285203284", "inReplyToAtomUri": null, "conversation": "tag:tldr.nettime.org,2024-11-23:objectId=21969127:objectType=Conversation", "localOnly": false, "content": "<p>&quot;OpenAI is once again lifting the lid (just a crack) on its safety-testing processes. Last month the company shared the results of an investigation that looked at how often ChatGPT produced a harmful gender or racial stereotype based on a user’s name. Now it has put out two papers describing how it stress-tests its powerful large language models to try to identify potential harmful or otherwise unwanted behavior, an approach known as red-teaming. </p><p>Large language models are now being used by millions of people for many different things. But as OpenAI itself points out, these models are known to produce racist, misogynistic and hateful content; reveal private information; amplify biases and stereotypes; and make stuff up. The company wants to share what it is doing to minimize such behaviors.</p><p>MIT Technology Review got an exclusive preview of the work. The first paper describes how OpenAI directs an extensive network of human testers outside the company to vet the behavior of its models before they are released. The second paper presents a new way to automate parts of the testing process, using a large language model like GPT-4 to come up with novel ways to bypass its own guardrails.&quot;</p><p><a href=\"https://www.technologyreview.com/2024/11/21/1107158/how-openai-stress-tests-its-large-language-models/?utm_source=the_download&amp;utm_medium=email&amp;utm_campaign=the_download.unpaid.engagement&amp;utm_term=Active%20Qualified&amp;utm_content=11-22-2024&amp;mc_cid=f4a6507b8a\" target=\"_blank\" rel=\"nofollow noopener noreferrer\" translate=\"no\"><span class=\"invisible\">https://www.</span><span class=\"ellipsis\">technologyreview.com/2024/11/2</span><span class=\"invisible\">1/1107158/how-openai-stress-tests-its-large-language-models/?utm_source=the_download&amp;utm_medium=email&amp;utm_campaign=the_download.unpaid.engagement&amp;utm_term=Active%20Qualified&amp;utm_content=11-22-2024&amp;mc_cid=f4a6507b8a</span></a></p><p><a href=\"https://tldr.nettime.org/tags/AI\" class=\"mention hashtag\" rel=\"tag\">#<span>AI</span></a> <a href=\"https://tldr.nettime.org/tags/GenerativeAI\" class=\"mention hashtag\" rel=\"tag\">#<span>GenerativeAI</span></a> <a href=\"https://tldr.nettime.org/tags/OpenAI\" class=\"mention hashtag\" rel=\"tag\">#<span>OpenAI</span></a> <a href=\"https://tldr.nettime.org/tags/ChatGPT\" class=\"mention hashtag\" rel=\"tag\">#<span>ChatGPT</span></a> <a href=\"https://tldr.nettime.org/tags/LLMs\" class=\"mention hashtag\" rel=\"tag\">#<span>LLMs</span></a> <a href=\"https://tldr.nettime.org/tags/AITraining\" class=\"mention hashtag\" rel=\"tag\">#<span>AITraining</span></a></p>", "contentMap": { "pt": "<p>&quot;OpenAI is once again lifting the lid (just a crack) on its safety-testing processes. Last month the company shared the results of an investigation that looked at how often ChatGPT produced a harmful gender or racial stereotype based on a user’s name. Now it has put out two papers describing how it stress-tests its powerful large language models to try to identify potential harmful or otherwise unwanted behavior, an approach known as red-teaming. </p><p>Large language models are now being used by millions of people for many different things. But as OpenAI itself points out, these models are known to produce racist, misogynistic and hateful content; reveal private information; amplify biases and stereotypes; and make stuff up. The company wants to share what it is doing to minimize such behaviors.</p><p>MIT Technology Review got an exclusive preview of the work. The first paper describes how OpenAI directs an extensive network of human testers outside the company to vet the behavior of its models before they are released. The second paper presents a new way to automate parts of the testing process, using a large language model like GPT-4 to come up with novel ways to bypass its own guardrails.&quot;</p><p><a href=\"https://www.technologyreview.com/2024/11/21/1107158/how-openai-stress-tests-its-large-language-models/?utm_source=the_download&amp;utm_medium=email&amp;utm_campaign=the_download.unpaid.engagement&amp;utm_term=Active%20Qualified&amp;utm_content=11-22-2024&amp;mc_cid=f4a6507b8a\" target=\"_blank\" rel=\"nofollow noopener noreferrer\" translate=\"no\"><span class=\"invisible\">https://www.</span><span class=\"ellipsis\">technologyreview.com/2024/11/2</span><span class=\"invisible\">1/1107158/how-openai-stress-tests-its-large-language-models/?utm_source=the_download&amp;utm_medium=email&amp;utm_campaign=the_download.unpaid.engagement&amp;utm_term=Active%20Qualified&amp;utm_content=11-22-2024&amp;mc_cid=f4a6507b8a</span></a></p><p><a href=\"https://tldr.nettime.org/tags/AI\" class=\"mention hashtag\" rel=\"tag\">#<span>AI</span></a> <a href=\"https://tldr.nettime.org/tags/GenerativeAI\" class=\"mention hashtag\" rel=\"tag\">#<span>GenerativeAI</span></a> <a href=\"https://tldr.nettime.org/tags/OpenAI\" class=\"mention hashtag\" rel=\"tag\">#<span>OpenAI</span></a> <a href=\"https://tldr.nettime.org/tags/ChatGPT\" class=\"mention hashtag\" rel=\"tag\">#<span>ChatGPT</span></a> <a href=\"https://tldr.nettime.org/tags/LLMs\" class=\"mention hashtag\" rel=\"tag\">#<span>LLMs</span></a> <a href=\"https://tldr.nettime.org/tags/AITraining\" class=\"mention hashtag\" rel=\"tag\">#<span>AITraining</span></a></p>" }, "attachment": [], "tag": [ { "type": "Hashtag", "href": "https://tldr.nettime.org/tags/ai", "name": "#ai" }, { "type": "Hashtag", "href": "https://tldr.nettime.org/tags/generativeAI", "name": "#generativeAI" }, { "type": "Hashtag", "href": "https://tldr.nettime.org/tags/openai", "name": "#openai" }, { "type": "Hashtag", "href": "https://tldr.nettime.org/tags/chatgpt", "name": "#chatgpt" }, { "type": "Hashtag", "href": "https://tldr.nettime.org/tags/LLMs", "name": "#LLMs" }, { "type": "Hashtag", "href": "https://tldr.nettime.org/tags/aitraining", "name": "#aitraining" } ], "replies": { "id": "https://tldr.nettime.org/users/remixtures/statuses/113532702285203284/replies", "type": "Collection", "first": { "type": "CollectionPage", "next": "https://tldr.nettime.org/users/remixtures/statuses/113532702285203284/replies?only_other_accounts=true&page=true", "partOf": "https://tldr.nettime.org/users/remixtures/statuses/113532702285203284/replies", "items": [] } } } }