@article{Kemper2020, author = {Carolin Kemper}, title = {Kafkaesque AI? Legal Decision-Making in the Era of Machine Learning}, series = {Intellectual Property and Technology Law Journal}, volume = {24}, number = {2}, publisher = {University of San Francisco}, address = {San Francisco, CA}, url = {https://nbn-resolving.org/urn:nbn:de:0246-opus4-47862}, pages = {251 -- 294}, year = {2020}, abstract = {Artificial Intelligence (“AI”) is already being employed to make critical legal decisions in many countries all over the world. The use of AI in decision-making is a widely debated issue due to allegations of bias, opacity, and lack of accountability. For many, algorithmic decision-making seems obscure, inscrutable, or virtually dystopic. Like in Kafka’s The Trial, the decision-makers are anonymous and cannot be challenged in a discursive manner. This article addresses the question of how AI technology can be used for legal decisionmaking and decision-support without appearing Kafkaesque. First, two types of machine learning algorithms are outlined: both Decision Trees and Artificial Neural Networks are commonly used in decision-making software. The real-world use of those technologies is shown on a few examples. Three types of use-cases are identified, depending on how directly humans are influenced by the decision. To establish criteria for evaluating the use of AI in decision-making, machine ethics, the theory of procedural justice, the rule of law, and the principles of due process are consulted. Subsequently, transparency, fairness, accountability, the right to be heard and the right to notice, as well as dignity and respect are discussed. Furthermore, possible safeguards and potential solutions to tackle existing problems are presented. In conclusion, AI rendering decisions on humans does not have to be Kafkaesque. Many solutions and approaches offer possibilities to not only ameliorate the downsides of current AI technologies, but to enrich and enhance the legal system.}, language = {en} }