ActivityPub Viewer

A small tool to view real-world ActivityPub objects as JSON! Enter a URL or username from Mastodon or a similar service below, and we'll send a request with the right Accept header to the server to view the underlying object.

Open in browser →
{ "@context": [ "https://www.w3.org/ns/activitystreams", { "ostatus": "http://ostatus.org#", "atomUri": "ostatus:atomUri", "inReplyToAtomUri": "ostatus:inReplyToAtomUri", "conversation": "ostatus:conversation", "sensitive": "as:sensitive", "toot": "http://joinmastodon.org/ns#", "votersCount": "toot:votersCount", "litepub": "http://litepub.social/ns#", "directMessage": "litepub:directMessage", "Hashtag": "as:Hashtag" } ], "id": "https://infosec.exchange/users/Markcarter/statuses/114070476604727801", "type": "Note", "summary": null, "inReplyTo": null, "published": "2025-02-26T13:42:20Z", "url": "https://infosec.exchange/@Markcarter/114070476604727801", "attributedTo": "https://infosec.exchange/users/Markcarter", "to": [ "https://www.w3.org/ns/activitystreams#Public" ], "cc": [ "https://infosec.exchange/users/Markcarter/followers" ], "sensitive": false, "atomUri": "https://infosec.exchange/users/Markcarter/statuses/114070476604727801", "inReplyToAtomUri": null, "conversation": "tag:infosec.exchange,2025-02-26:objectId=245639417:objectType=Conversation", "content": "<p>57% of enterprise employees admit to entering high-risk information into publicly available generative AI assistants, exposing critical security gaps in enterprise AI usage 🛡️ )--Nearly seven out of 10 (68%) enterprise employees who use generative AI (GenAI) at work say they access publicly available GenAI assistants such as ChatGPT, Microsoft Copilot or Google Gemini through personal accounts, and more than half (57%) have admitted to entering sensitive information into them.<br />Surveyed employees admitted to entering the following types of information into publicly available GenAI assistants:</p><p>Personal data, such as names, addresses, emails and phone numbers (31%).<br />Product or project details, including unreleased details and prototypes (29%).<br />Customer information, including names, contact details, order history, chat logs, emails, or recorded calls (21%).<br />Confidential company financial information, such as revenue, profit margins, budgets, or forecasts (11%).<br />This happens despite nearly a third (29%) of employees acknowledging their companies have policies in place that prohibit them from inputting company, client or other sensitive information into GenAI assistants.</p><p>Regardless of the risks, many employees in the survey indicated that their company is falling short on providing them with information and training to use GenAI safely:</p><p>Only 24% of employees said their company requires mandatory AI assistant training.<br />44% said their company does not have AI guidelines or policies in place, or they don’t know if their company does.<br />50% said they are not sure if they&#39;re adhering to their company’s AI guidelines.<br />42% said there are no repercussions for not following their company’s AI guidelines.</p><p><a href=\"https://www.businesswire.com/news/home/20250226490609/en/TELUS-Digital-Survey-Reveals-Enterprise-Employees-Are-Entering-Sensitive-Data-Into-AI-Assistants-More-Than-You-Think\" target=\"_blank\" rel=\"nofollow noopener\" translate=\"no\"><span class=\"invisible\">https://www.</span><span class=\"ellipsis\">businesswire.com/news/home/202</span><span class=\"invisible\">50226490609/en/TELUS-Digital-Survey-Reveals-Enterprise-Employees-Are-Entering-Sensitive-Data-Into-AI-Assistants-More-Than-You-Think</span></a> <a href=\"https://infosec.exchange/tags/Infosec\" class=\"mention hashtag\" rel=\"tag\">#<span>Infosec</span></a> <a href=\"https://infosec.exchange/tags/Legal\" class=\"mention hashtag\" rel=\"tag\">#<span>Legal</span></a></p>", "contentMap": { "en": "<p>57% of enterprise employees admit to entering high-risk information into publicly available generative AI assistants, exposing critical security gaps in enterprise AI usage 🛡️ )--Nearly seven out of 10 (68%) enterprise employees who use generative AI (GenAI) at work say they access publicly available GenAI assistants such as ChatGPT, Microsoft Copilot or Google Gemini through personal accounts, and more than half (57%) have admitted to entering sensitive information into them.<br />Surveyed employees admitted to entering the following types of information into publicly available GenAI assistants:</p><p>Personal data, such as names, addresses, emails and phone numbers (31%).<br />Product or project details, including unreleased details and prototypes (29%).<br />Customer information, including names, contact details, order history, chat logs, emails, or recorded calls (21%).<br />Confidential company financial information, such as revenue, profit margins, budgets, or forecasts (11%).<br />This happens despite nearly a third (29%) of employees acknowledging their companies have policies in place that prohibit them from inputting company, client or other sensitive information into GenAI assistants.</p><p>Regardless of the risks, many employees in the survey indicated that their company is falling short on providing them with information and training to use GenAI safely:</p><p>Only 24% of employees said their company requires mandatory AI assistant training.<br />44% said their company does not have AI guidelines or policies in place, or they don’t know if their company does.<br />50% said they are not sure if they&#39;re adhering to their company’s AI guidelines.<br />42% said there are no repercussions for not following their company’s AI guidelines.</p><p><a href=\"https://www.businesswire.com/news/home/20250226490609/en/TELUS-Digital-Survey-Reveals-Enterprise-Employees-Are-Entering-Sensitive-Data-Into-AI-Assistants-More-Than-You-Think\" target=\"_blank\" rel=\"nofollow noopener\" translate=\"no\"><span class=\"invisible\">https://www.</span><span class=\"ellipsis\">businesswire.com/news/home/202</span><span class=\"invisible\">50226490609/en/TELUS-Digital-Survey-Reveals-Enterprise-Employees-Are-Entering-Sensitive-Data-Into-AI-Assistants-More-Than-You-Think</span></a> <a href=\"https://infosec.exchange/tags/Infosec\" class=\"mention hashtag\" rel=\"tag\">#<span>Infosec</span></a> <a href=\"https://infosec.exchange/tags/Legal\" class=\"mention hashtag\" rel=\"tag\">#<span>Legal</span></a></p>" }, "attachment": [], "tag": [ { "type": "Hashtag", "href": "https://infosec.exchange/tags/infosec", "name": "#infosec" }, { "type": "Hashtag", "href": "https://infosec.exchange/tags/legal", "name": "#legal" } ], "replies": { "id": "https://infosec.exchange/users/Markcarter/statuses/114070476604727801/replies", "type": "Collection", "first": { "type": "CollectionPage", "next": "https://infosec.exchange/users/Markcarter/statuses/114070476604727801/replies?only_other_accounts=true&page=true", "partOf": "https://infosec.exchange/users/Markcarter/statuses/114070476604727801/replies", "items": [] } }, "likes": { "id": "https://infosec.exchange/users/Markcarter/statuses/114070476604727801/likes", "type": "Collection", "totalItems": 2 }, "shares": { "id": "https://infosec.exchange/users/Markcarter/statuses/114070476604727801/shares", "type": "Collection", "totalItems": 0 } }